code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
class UpperCamelCase__ :
def __init__( self : str, __lowerCamelCase : Optional[Any] ) -> str:
UpperCamelCase__ : Dict = val
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Union[str, Any] = None
def __lowercase( self : Optional[int], __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
if self.val:
if val < self.val:
if self.left is None:
UpperCamelCase__ : int = Node(__lowerCamelCase )
else:
self.left.insert(__lowerCamelCase )
elif val > self.val:
if self.right is None:
UpperCamelCase__ : Dict = Node(__lowerCamelCase )
else:
self.right.insert(__lowerCamelCase )
else:
UpperCamelCase__ : int = val
def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
if root:
inorder(root.left ,__lowerCamelCase )
res.append(root.val )
inorder(root.right ,__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if len(__lowerCamelCase ) == 0:
return arr
UpperCamelCase__ : Tuple = Node(arr[0] )
for i in range(1 ,len(__lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCamelCase__ : List[Any] = []
inorder(__lowerCamelCase ,__lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 344
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Dict = 'dandelin/vilt-b32-finetuned-vqa'
a__ : List[str] = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
a__ : List[str] = 'image_qa'
a__ : Optional[int] = AutoProcessor
a__ : List[str] = AutoModelForVisualQuestionAnswering
a__ : Union[str, Any] = ['image', 'text']
a__ : int = ['text']
def __init__( self : Tuple, *__lowerCamelCase : Union[str, Any], **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(self, ['''vision'''] )
super().__init__(*__lowerCamelCase, **__lowerCamelCase )
def __lowercase( self : List[Any], __lowerCamelCase : "Image", __lowerCamelCase : str ) -> Optional[Any]:
return self.pre_processor(__lowerCamelCase, __lowerCamelCase, return_tensors='''pt''' )
def __lowercase( self : Optional[int], __lowerCamelCase : List[str] ) -> Any:
with torch.no_grad():
return self.model(**__lowerCamelCase ).logits
def __lowercase( self : List[str], __lowerCamelCase : Dict ) -> Dict:
UpperCamelCase__ : Union[str, Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 344
| 1
|
"""simple docstring"""
import math
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return math.pow(__UpperCamelCase , 2 ) - a
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return 2 * x
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = 2.0
while start <= a:
__A = math.pow(__UpperCamelCase , 2 )
return start
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 9_9_9_9 , __UpperCamelCase = 0.00000000000001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
__A = get_initial_point(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
__A = value
__A = value - fx(__UpperCamelCase , __UpperCamelCase ) / fx_derivative(__UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 215
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase_ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase ( ):
"""simple docstring"""
__A = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__A = get_sagemaker_input()
else:
__A = get_cluster_input()
return config
def lowerCAmelCase ( __UpperCamelCase=None ):
"""simple docstring"""
if subparsers is not None:
__A = subparsers.add_parser('''config''' , description=__UpperCamelCase )
else:
__A = argparse.ArgumentParser('''Accelerate config command''' , description=__UpperCamelCase )
parser.add_argument(
'''--config_file''' , default=__UpperCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = get_user_input()
if args.config_file is not None:
__A = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
__A = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(f'accelerate configuration saved at {config_file}' )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = config_command_parser()
__A = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 215
| 1
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( UpperCamelCase_ ):
snake_case__ = (UnCLIPScheduler,)
def __UpperCamelCase ( self : str ,**a__ : Dict) -> str:
"""simple docstring"""
_lowerCAmelCase:Any = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**a__)
return config
def __UpperCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__)
def __UpperCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=a__)
def __UpperCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a__)
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=a__)
def __UpperCamelCase ( self : str) -> Dict:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=a__)
def __UpperCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=a__ ,prev_timestep=a__)
def __UpperCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:List[Any] = self.scheduler_classes[0]
_lowerCAmelCase:str = self.get_scheduler_config(variance_type='''fixed_small_log''')
_lowerCAmelCase:str = scheduler_class(**a__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0_000E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.9994987)) < 1E-5
def __UpperCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.scheduler_classes[0]
_lowerCAmelCase:Union[str, Any] = self.get_scheduler_config(variance_type='''learned_range''')
_lowerCAmelCase:List[Any] = scheduler_class(**a__)
_lowerCAmelCase:str = 0.5
assert scheduler._get_variance(1 ,predicted_variance=a__) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 ,predicted_variance=a__) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 ,predicted_variance=a__) - -0.0010011 < 1E-5
def __UpperCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_lowerCAmelCase:str = self.scheduler_classes[0]
_lowerCAmelCase:Any = self.get_scheduler_config()
_lowerCAmelCase:List[Any] = scheduler_class(**a__)
_lowerCAmelCase:List[Any] = scheduler.timesteps
_lowerCAmelCase:Optional[Any] = self.dummy_model()
_lowerCAmelCase:Tuple = self.dummy_sample_deter
_lowerCAmelCase:Optional[Any] = torch.manual_seed(0)
for i, t in enumerate(a__):
# 1. predict noise residual
_lowerCAmelCase:Dict = model(a__ ,a__)
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase:str = scheduler.step(a__ ,a__ ,a__ ,generator=a__).prev_sample
_lowerCAmelCase:str = pred_prev_sample
_lowerCAmelCase:Dict = torch.sum(torch.abs(a__))
_lowerCAmelCase:Optional[Any] = torch.mean(torch.abs(a__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def __UpperCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.scheduler_classes[0]
_lowerCAmelCase:List[Any] = self.get_scheduler_config()
_lowerCAmelCase:Dict = scheduler_class(**a__)
scheduler.set_timesteps(25)
_lowerCAmelCase:Any = scheduler.timesteps
_lowerCAmelCase:Any = self.dummy_model()
_lowerCAmelCase:int = self.dummy_sample_deter
_lowerCAmelCase:List[Any] = torch.manual_seed(0)
for i, t in enumerate(a__):
# 1. predict noise residual
_lowerCAmelCase:List[Any] = model(a__ ,a__)
if i + 1 == timesteps.shape[0]:
_lowerCAmelCase:Tuple = None
else:
_lowerCAmelCase:Optional[int] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase:List[Any] = scheduler.step(
a__ ,a__ ,a__ ,prev_timestep=a__ ,generator=a__).prev_sample
_lowerCAmelCase:Optional[int] = pred_prev_sample
_lowerCAmelCase:Dict = torch.sum(torch.abs(a__))
_lowerCAmelCase:List[str] = torch.mean(torch.abs(a__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def __UpperCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
pass
| 227
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( UpperCamelCase_ ):
def __init__( self : Optional[Any] ,a__ : Union[List[ControlNetModel], Tuple[ControlNetModel]]) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:Dict = nn.ModuleList(a__)
def __UpperCamelCase ( self : Optional[Any] ,a__ : torch.FloatTensor ,a__ : Union[torch.Tensor, float, int] ,a__ : torch.Tensor ,a__ : List[torch.tensor] ,a__ : List[float] ,a__ : Optional[torch.Tensor] = None ,a__ : Optional[torch.Tensor] = None ,a__ : Optional[torch.Tensor] = None ,a__ : Optional[Dict[str, Any]] = None ,a__ : bool = False ,a__ : bool = True ,) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a__ ,a__ ,self.nets)):
_lowerCAmelCase , _lowerCAmelCase:Dict = controlnet(
a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,)
# merge samples
if i == 0:
_lowerCAmelCase , _lowerCAmelCase:Any = down_samples, mid_sample
else:
_lowerCAmelCase:str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a__ ,a__)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCamelCase ( self : str ,a__ : Union[str, os.PathLike] ,a__ : bool = True ,a__ : Callable = None ,a__ : bool = False ,a__ : Optional[str] = None ,) -> Dict:
"""simple docstring"""
_lowerCAmelCase:List[Any] = 0
_lowerCAmelCase:Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a__ ,is_main_process=a__ ,save_function=a__ ,safe_serialization=a__ ,variant=a__ ,)
idx += 1
_lowerCAmelCase:int = model_path_to_save + F'_{idx}'
@classmethod
def __UpperCamelCase ( cls : Tuple ,a__ : Optional[Union[str, os.PathLike]] ,**a__ : int) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:List[str] = 0
_lowerCAmelCase:Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCAmelCase:str = pretrained_model_path
while os.path.isdir(a__):
_lowerCAmelCase:Tuple = ControlNetModel.from_pretrained(a__ ,**a__)
controlnets.append(a__)
idx += 1
_lowerCAmelCase:Optional[int] = pretrained_model_path + F'_{idx}'
logger.info(F'{len(a__)} controlnets loaded from {pretrained_model_path}.')
if len(a__) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(a__)}. Expected at least {pretrained_model_path + "_0"}.')
return cls(a__)
| 227
| 1
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A__ ( A ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''image_processor''']
_lowercase : Optional[Any] = '''SamImageProcessor'''
def __init__( self : List[Any] , A_ : str ):
'''simple docstring'''
super().__init__(A_ )
_lowerCAmelCase : List[Any] = self.image_processor
_lowerCAmelCase : Tuple = -1_0
_lowerCAmelCase : Any = self.image_processor.size["longest_edge"]
def __call__( self : Any , A_ : Tuple=None , A_ : Optional[Any]=None , A_ : Union[str, Any]=None , A_ : Optional[Any]=None , A_ : Optional[Union[str, TensorType]] = None , **A_ : Union[str, Any] , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processor(
A_ , return_tensors=A_ , **A_ , )
# pop arguments that are not used in the foward but used nevertheless
_lowerCAmelCase : Optional[Any] = encoding_image_processor["original_sizes"]
if hasattr(A_ , "numpy" ): # Checks if Torch or TF tensor
_lowerCAmelCase : Any = original_sizes.numpy()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self._check_and_preprocess_points(
input_points=A_ , input_labels=A_ , input_boxes=A_ , )
_lowerCAmelCase : List[Any] = self._normalize_and_convert(
A_ , A_ , input_points=A_ , input_labels=A_ , input_boxes=A_ , return_tensors=A_ , )
return encoding_image_processor
def __magic_name__ ( self : int , A_ : List[Any] , A_ : Tuple , A_ : str=None , A_ : Any=None , A_ : Tuple=None , A_ : List[str]="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(A_ ) != len(A_ ):
_lowerCAmelCase : List[str] = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] ) for point in input_points
]
else:
_lowerCAmelCase : Any = [
self._normalize_coordinates(self.target_size , A_ , A_ )
for point, original_size in zip(A_ , A_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self._pad_points_and_labels(A_ , A_ )
_lowerCAmelCase : Tuple = np.array(A_ )
if input_labels is not None:
_lowerCAmelCase : List[str] = np.array(A_ )
if input_boxes is not None:
if len(A_ ) != len(A_ ):
_lowerCAmelCase : Optional[Any] = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] , is_bounding_box=A_ )
for box in input_boxes
]
else:
_lowerCAmelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , A_ , A_ , is_bounding_box=A_ )
for box, original_size in zip(A_ , A_ )
]
_lowerCAmelCase : Any = np.array(A_ )
if input_boxes is not None:
if return_tensors == "pt":
_lowerCAmelCase : Optional[int] = torch.from_numpy(A_ )
# boxes batch size of 1 by default
_lowerCAmelCase : Tuple = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowerCAmelCase : Dict = tf.convert_to_tensor(A_ )
# boxes batch size of 1 by default
_lowerCAmelCase : List[Any] = tf.expand_dims(A_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ )
# point batch size of 1 by default
_lowerCAmelCase : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowerCAmelCase : List[str] = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
_lowerCAmelCase : Tuple = tf.expand_dims(A_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowerCAmelCase : Any = torch.from_numpy(A_ )
# point batch size of 1 by default
_lowerCAmelCase : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowerCAmelCase : List[str] = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
_lowerCAmelCase : str = tf.expand_dims(A_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def __magic_name__ ( self : Union[str, Any] , A_ : Tuple , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = max([point.shape[0] for point in input_points] )
_lowerCAmelCase : Any = []
for i, point in enumerate(A_ ):
if point.shape[0] != expected_nb_points:
_lowerCAmelCase : str = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_lowerCAmelCase : Optional[int] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(A_ )
_lowerCAmelCase : Optional[int] = processed_input_points
return input_points, input_labels
def __magic_name__ ( self : List[Any] , A_ : int , A_ : np.ndarray , A_ : List[Any] , A_ : Tuple=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = original_size
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.image_processor._get_preprocess_shape(A_ , longest_edge=A_ )
_lowerCAmelCase : Any = deepcopy(A_ ).astype(A_ )
if is_bounding_box:
_lowerCAmelCase : Any = coords.reshape(-1 , 2 , 2 )
_lowerCAmelCase : Dict = coords[..., 0] * (new_w / old_w)
_lowerCAmelCase : str = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowerCAmelCase : Dict = coords.reshape(-1 , 4 )
return coords
def __magic_name__ ( self : Any , A_ : Dict=None , A_ : Optional[Any]=None , A_ : Tuple=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(A_ , "numpy" ): # Checks for TF or Torch tensor
_lowerCAmelCase : List[Any] = input_points.numpy().tolist()
if not isinstance(A_ , A_ ) or not isinstance(input_points[0] , A_ ):
raise ValueError("Input points must be a list of list of floating points." )
_lowerCAmelCase : Dict = [np.array(A_ ) for input_point in input_points]
else:
_lowerCAmelCase : Any = None
if input_labels is not None:
if hasattr(A_ , "numpy" ):
_lowerCAmelCase : Union[str, Any] = input_labels.numpy().tolist()
if not isinstance(A_ , A_ ) or not isinstance(input_labels[0] , A_ ):
raise ValueError("Input labels must be a list of list integers." )
_lowerCAmelCase : List[Any] = [np.array(A_ ) for label in input_labels]
else:
_lowerCAmelCase : Optional[Any] = None
if input_boxes is not None:
if hasattr(A_ , "numpy" ):
_lowerCAmelCase : Optional[Any] = input_boxes.numpy().tolist()
if (
not isinstance(A_ , A_ )
or not isinstance(input_boxes[0] , A_ )
or not isinstance(input_boxes[0][0] , A_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_lowerCAmelCase : int = [np.array(A_ ).astype(np.floataa ) for box in input_boxes]
else:
_lowerCAmelCase : Union[str, Any] = None
return input_points, input_labels, input_boxes
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(A_ ) )
def __magic_name__ ( self : Tuple , *A_ : Any , **A_ : Tuple ):
'''simple docstring'''
return self.image_processor.post_process_masks(*A_ , **A_ )
| 503
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] , A_ : bool = True ):
'''simple docstring'''
_lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCAmelCase : List[Any] = directed
def __magic_name__ ( self : Any , A_ : T , A_ : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
self.adj_list[destination_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
_lowerCAmelCase : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A_ )
_lowerCAmelCase : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase : List[str] = [destination_vertex]
_lowerCAmelCase : Dict = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
_lowerCAmelCase : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase : Any = [destination_vertex]
_lowerCAmelCase : Union[str, Any] = []
return self
def __repr__( self : List[str] ):
'''simple docstring'''
return pformat(self.adj_list )
| 503
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase__( A ):
snake_case__ , snake_case__ : Dict = image.size
snake_case__ , snake_case__ : Union[str, Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
snake_case__ : Optional[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
snake_case__ : Tuple = np.array(A ).astype(np.floataa ) / 255.0
snake_case__ : List[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case__ : List[str] = torch.from_numpy(A )
return 2.0 * image - 1.0
class snake_case__ ( UpperCamelCase_ ):
def __init__( self : int , _lowerCamelCase : VQModel , _lowerCamelCase : UNetaDModel , _lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : Tuple , _lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , _lowerCamelCase : Optional[int] = 1 , _lowerCamelCase : Optional[int] = 1_0_0 , _lowerCamelCase : Optional[float] = 0.0 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , ):
if isinstance(_lowerCamelCase , PIL.Image.Image ):
snake_case__ : str = 1
elif isinstance(_lowerCamelCase , torch.Tensor ):
snake_case__ : Any = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_lowerCamelCase )}''' )
if isinstance(_lowerCamelCase , PIL.Image.Image ):
snake_case__ : Tuple = preprocess(_lowerCamelCase )
snake_case__ , snake_case__ : Optional[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case__ : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case__ : Dict = next(self.unet.parameters() ).dtype
snake_case__ : Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase )
snake_case__ : Tuple = image.to(device=self.device , dtype=_lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_lowerCamelCase , device=self.device )
snake_case__ : List[str] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : Union[str, Any] = {}
if accepts_eta:
snake_case__ : Optional[int] = eta
for t in self.progress_bar(_lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
snake_case__ : Union[str, Any] = torch.cat([latents, image] , dim=1 )
snake_case__ : List[str] = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
# predict the noise residual
snake_case__ : int = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : List[str] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
snake_case__ : List[Any] = self.vqvae.decode(_lowerCamelCase ).sample
snake_case__ : Optional[Any] = torch.clamp(_lowerCamelCase , -1.0 , 1.0 )
snake_case__ : Union[str, Any] = image / 2 + 0.5
snake_case__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Optional[Any] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 170
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : List[str] = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 170
| 1
|
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowerCAmelCase : Optional[int] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
_lowerCAmelCase : str = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
_lowerCAmelCase : Dict = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=10 , _UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] , _UpperCAmelCase : int=[1, 1, 2, 1] , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str="relu" , _UpperCAmelCase : int=3 , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : List[str] = embeddings_size
_lowerCAmelCase : int = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : Optional[int] = len(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Any = TFResNetForImageClassification(_UpperCAmelCase )
_lowerCAmelCase : str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFResNetModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_lowerCAmelCase : Dict = model_class(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Tuple = layer_type
_lowerCAmelCase : Any = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Optional[int] = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase : int = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1E-4 ) )
| 196
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78
| 1
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __snake_case :
def __init__( self , _A , _A = 13 , _A = 64 , _A = 2 , _A = 3 , _A = 3 , _A = True , _A = True , _A = 128 , _A=[16, 32, 64, 128] , _A = 7 , _A = 4 , _A = 37 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 10 , _A = 0.0_2 , _A = 2 , _A = 1 , _A = 128 , _A = [2, 2, 2, 2] , _A = 2 , _A = 2 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
SCREAMING_SNAKE_CASE_ = num_attention_outputs
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = embed_dim + 1
SCREAMING_SNAKE_CASE_ = resolution
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = dim
SCREAMING_SNAKE_CASE_ = mlp_expansion_ratio
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__ ( self , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModel(config=_A)
SCREAMING_SNAKE_CASE_ = model(_A , training=_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase__ ( self , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification(_A)
SCREAMING_SNAKE_CASE_ = model(_A , labels=_A , training=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification(_A)
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ = model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : int = False
__lowerCAmelCase : int = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = False
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModelTester(self)
SCREAMING_SNAKE_CASE_ = ConfigTester(
self , config_class=_A , has_text_modality=_A , hidden_size=37)
def lowerCAmelCase__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def lowerCAmelCase__ ( self):
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def lowerCAmelCase__ ( self):
pass
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A)
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def lowerCAmelCase__ ( self):
def check_hidden_states_output(_A , _A , _A):
SCREAMING_SNAKE_CASE_ = model_class(_A)
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A) , training=_A)
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_A) , _A)
if hasattr(self.model_tester , 'encoder_seq_length'):
SCREAMING_SNAKE_CASE_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE_ = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ = outputs.decoder_hidden_states
self.asseretIsInstance(_A , (list, tuple))
self.assertEqual(len(_A) , _A)
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'seq_length' , _A)
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'decoder_seq_length' , _A)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A)
def lowerCAmelCase__ ( self , _A , _A , _A=False):
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_A , _A , return_labels=_A)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@slow
def lowerCAmelCase__ ( self):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'seq_length' , _A)
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'encoder_seq_length' , _A)
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'key_length' , _A)
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'chunk_length' , _A)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
SCREAMING_SNAKE_CASE_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_A)
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A) , training=_A)
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_A)
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A) , training=_A)
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__ ( self):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE_ = model_class(_A)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_A)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE_ = model(_A)
self.assertTrue(outputs_dict is not None)
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self):
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='tf')
# forward pass
SCREAMING_SNAKE_CASE_ = model(**_A , training=_A)
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , _A)
SCREAMING_SNAKE_CASE_ = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2])
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4))
@slow
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='tf')
# forward pass
SCREAMING_SNAKE_CASE_ = model(**_A , training=_A)
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , _A)
SCREAMING_SNAKE_CASE_ = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9])
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4))
| 620
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'dpr'
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ):
super().__init__(pad_token_id=_A , **_A)
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = position_embedding_type
| 620
| 1
|
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE__ = len(lowercase_ )
SCREAMING_SNAKE_CASE__ = len(lowercase_ )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = '''AGGTAB'''
_SCREAMING_SNAKE_CASE : List[str] = '''GXTXAYB'''
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''GTAB'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 493
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
lowerCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : Optional[int] = MBartTokenizer
_lowerCAmelCase : List[int] = []
_lowerCAmelCase : List[int] = []
def __init__( self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
__UpperCAmelCase : Optional[Any] = vocab_file
__UpperCAmelCase : int = False if not self.vocab_file else True
__UpperCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens})
__UpperCAmelCase : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(lowercase__) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCAmelCase : Any = src_lang if src_lang is not None else '''en_XX'''
__UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang)
__UpperCAmelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def A( self):
return self._src_lang
@src_lang.setter
def A( self , lowercase__):
__UpperCAmelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def A( self , lowercase__ , lowercase__ = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
__UpperCAmelCase : Optional[Any] = src_lang
__UpperCAmelCase : Union[str, Any] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__)
__UpperCAmelCase : int = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def A( self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
__UpperCAmelCase : Any = src_lang
__UpperCAmelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__)
def A( self):
return self.set_src_lang_special_tokens(self.src_lang)
def A( self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def A( self , lowercase__):
__UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : int = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def A( self , lowercase__):
__UpperCAmelCase : List[str] = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def A( self , lowercase__ , lowercase__ = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
__UpperCAmelCase : List[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase__):
copyfile(self.vocab_file , lowercase__)
return (out_vocab_file,)
| 462
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 438
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase_ : List[Any] = 128
elif "12-12" in model_name:
lowercase_ : Tuple = 12
lowercase_ : List[Any] = 12
elif "14-14" in model_name:
lowercase_ : List[str] = 14
lowercase_ : Optional[Any] = 14
elif "16-16" in model_name:
lowercase_ : Union[str, Any] = 16
lowercase_ : List[str] = 16
else:
raise ValueError('Model not supported' )
lowercase_ : Optional[Any] = 'huggingface/label-files'
if "speech-commands" in model_name:
lowercase_ : List[str] = 35
lowercase_ : int = 'speech-commands-v2-id2label.json'
else:
lowercase_ : Union[str, Any] = 527
lowercase_ : int = 'audioset-id2label.json'
lowercase_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowercase_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase_ : Optional[int] = idalabel
lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if "module.v" in name:
lowercase_ : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowercase_ : Optional[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowercase_ : Any = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowercase_ : List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase_ : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowercase_ : Optional[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase_ : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : int = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase_ : int = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowercase_ : Dict = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowercase_ : List[Any] = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase_ : List[str] = key.split('.' )
lowercase_ : int = int(key_split[3] )
lowercase_ : Tuple = config.hidden_size
if "weight" in key:
lowercase_ : Tuple = val[:dim, :]
lowercase_ : Union[str, Any] = val[dim : dim * 2, :]
lowercase_ : Optional[int] = val[-dim:, :]
else:
lowercase_ : Optional[Any] = val[:dim]
lowercase_ : Any = val[dim : dim * 2]
lowercase_ : Tuple = val[-dim:]
else:
lowercase_ : Optional[Any] = val
return orig_state_dict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase_ : Dict = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[int] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowercase_ : Dict = model_name_to_url[model_name]
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
lowercase_ : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
lowercase_ : Optional[Any] = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase_ : Tuple = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978
lowercase_ : str = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526
lowercase_ : str = 1_024 if 'speech-commands' not in model_name else 128
lowercase_ : Dict = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
lowercase_ : Optional[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowercase_ : Any = dataset[0]['audio']['array']
else:
lowercase_ : Any = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowercase_ ,lowercase_ : Union[str, Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ )
lowercase_ : str = waveform.squeeze().numpy()
lowercase_ : str = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=16_000 , return_tensors='pt' )
# forward pass
lowercase_ : Tuple = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase_ : int = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase_ : Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase_ : Optional[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase_ : List[str] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase_ : List[str] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase_ : Any = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase_ : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase_ : Optional[Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 438
| 1
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def UpperCAmelCase ( snake_case : Optional[Any] ):
_lowerCAmelCase:Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
_lowerCAmelCase:Tuple = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , snake_case )
if matches:
_lowerCAmelCase:Optional[int] = float(matches[1] )
_lowerCAmelCase:str = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCAmelCase:str = 1001
_lowerCAmelCase:Optional[Any] = '''imagenet-1k-id2label.json'''
_lowerCAmelCase:Union[str, Any] = '''huggingface/label-files'''
_lowerCAmelCase:Any = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowerCAmelCase:List[Any] = {int(snake_case ) + 1: v for k, v in idalabel.items()}
_lowerCAmelCase:Optional[Any] = '''background'''
_lowerCAmelCase:int = idalabel
_lowerCAmelCase:Optional[int] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( ):
_lowerCAmelCase:int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCAmelCase:Any = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( snake_case : Optional[Any] , snake_case : int , snake_case : Dict , snake_case : Optional[int]=False ):
_lowerCAmelCase:Dict = get_mobilenet_va_config(snake_case )
# Load 🤗 model
_lowerCAmelCase:List[Any] = MobileNetVaForImageClassification(snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case , snake_case , snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCAmelCase:Optional[Any] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
_lowerCAmelCase:Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCAmelCase:Optional[Any] = model(**snake_case )
_lowerCAmelCase:Any = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
_lowerCAmelCase:Union[str, Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCAmelCase:Union[str, Any] = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
_lowerCAmelCase:List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
_lowerCAmelCase:int = '''google/''' + model_name
image_processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 227
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase ( snake_case : str ):
if "model" in orig_key:
_lowerCAmelCase:str = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
_lowerCAmelCase:List[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
_lowerCAmelCase:Any = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
_lowerCAmelCase:Tuple = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
_lowerCAmelCase:Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
_lowerCAmelCase:Optional[int] = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_lowerCAmelCase:Dict = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
_lowerCAmelCase:Union[str, Any] = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
_lowerCAmelCase:int = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
_lowerCAmelCase:Tuple = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
_lowerCAmelCase:str = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
_lowerCAmelCase:List[Any] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
_lowerCAmelCase:int = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
_lowerCAmelCase:Any = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
_lowerCAmelCase:Optional[Any] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
_lowerCAmelCase:str = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
_lowerCAmelCase:str = '''yoso.''' + orig_key
return orig_key
def UpperCAmelCase ( snake_case : Dict , snake_case : List[Any] ):
for key in orig_state_dict.copy().keys():
_lowerCAmelCase:Any = orig_state_dict.pop(snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCAmelCase:Optional[Any] = val
_lowerCAmelCase:Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
_lowerCAmelCase:Union[str, Any] = torch.arange(snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase ( snake_case : Any , snake_case : Union[str, Any] , snake_case : List[Any] ):
_lowerCAmelCase:str = torch.load(snake_case , map_location='''cpu''' )['''model_state_dict''']
_lowerCAmelCase:List[str] = YosoConfig.from_json_file(snake_case )
_lowerCAmelCase:Optional[int] = YosoForMaskedLM(snake_case )
_lowerCAmelCase:Tuple = convert_checkpoint_helper(config.max_position_embeddings , snake_case )
print(model.load_state_dict(snake_case ) )
model.eval()
model.save_pretrained(snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 227
| 1
|
def lowerCAmelCase( __lowerCamelCase = 6008_5147_5143 ):
try:
__a = int(__lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__a = 1
__a = 2
while i * i <= n:
while n % i == 0:
__a = i
n //= i
i += 1
if n > 1:
__a = n
return int(__lowerCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase_ : List[Any] = random.Random()
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=1.0 , __lowerCamelCase=None , __lowerCamelCase=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class a__ ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=4_0_0 , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=1 , UpperCAmelCase=0.0 , UpperCAmelCase=1_6_0_0_0 , UpperCAmelCase=True , UpperCAmelCase=8_0 , UpperCAmelCase=1_6 , UpperCAmelCase=6_4 , UpperCAmelCase="hann_window" , UpperCAmelCase=8_0 , UpperCAmelCase=7_6_0_0 , UpperCAmelCase=1e-10 , UpperCAmelCase=True , ) -> Tuple:
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = do_normalize
__a = num_mel_bins
__a = hop_length
__a = win_length
__a = win_function
__a = fmin
__a = fmax
__a = mel_floor
__a = return_attention_mask
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase=False , UpperCAmelCase=False ) -> Optional[int]:
def _flatten(UpperCAmelCase ):
return list(itertools.chain(*UpperCAmelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase=False , UpperCAmelCase=False ) -> List[Any]:
if equal_length:
__a = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class a__ ( __snake_case , unittest.TestCase ):
A__ : Tuple = SpeechTaFeatureExtractor
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = SpeechTaFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
self.assertTrue(np.all(np.mean(UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test batched
__a = feat_extract(UpperCAmelCase , return_tensors='np' ).input_values
__a = feat_extract(UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = ['longest', 'max_length', 'do_not_pad']
__a = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
__a = feat_extract(UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='np' )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = range(8_0_0 , 1_4_0_0 , 2_0_0 )
__a = [floats_list((1, x) )[0] for x in lengths]
__a = ['longest', 'max_length', 'do_not_pad']
__a = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
__a = feat_extract(UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=1_0_0_0 , padding='max_length' , return_tensors='np' )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=1_0_0_0 , padding='longest' , return_tensors='np' )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=2_0_0_0 , padding='longest' , return_tensors='np' )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(1_0_0 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
__a = feature_extractor(audio_target=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__a = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
__a = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test batched
__a = feature_extractor(UpperCAmelCase , return_tensors='np' ).input_values
__a = feature_extractor(UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__a = np.asarray(UpperCAmelCase )
__a = feature_extractor(UpperCAmelCase , return_tensors='np' ).input_values
__a = feature_extractor(UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.feat_extract_tester.prepare_inputs_for_target()
__a = self.feature_extraction_class(**self.feat_extract_dict )
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCAmelCase ) == len(UpperCAmelCase ) for x, y in zip(UpperCAmelCase , processed_features[input_name] ) ) )
__a = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCAmelCase )
__a = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCAmelCase )
__a = self.feature_extraction_class(**self.feat_extract_dict )
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.feature_extraction_class(**self.feat_extract_dict )
__a = self.feat_extract_tester.prepare_inputs_for_target()
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} )
__a = feat_extract.num_mel_bins # hack!
__a = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
__a = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.feat_extract_dict
__a = True
__a = self.feature_extraction_class(**UpperCAmelCase )
__a = self.feat_extract_tester.prepare_inputs_for_target()
__a = [len(UpperCAmelCase ) for x in speech_inputs]
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} )
__a = feat_extract.num_mel_bins # hack!
__a = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.feat_extract_dict
__a = True
__a = self.feature_extraction_class(**UpperCAmelCase )
__a = self.feat_extract_tester.prepare_inputs_for_target()
__a = [len(UpperCAmelCase ) for x in speech_inputs]
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} )
__a = min(UpperCAmelCase )
__a = feat_extract.num_mel_bins # hack!
__a = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int:
from datasets import load_dataset
__a = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__a = ds.sort('id' ).select(range(UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# fmt: off
__a = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
__a = self._load_datasamples(1 )
__a = SpeechTaFeatureExtractor()
__a = feature_extractor(UpperCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , UpperCAmelCase , atol=1e-6 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# fmt: off
__a = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
__a = self._load_datasamples(1 )
__a = SpeechTaFeatureExtractor()
__a = feature_extractor(audio_target=UpperCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase , atol=1e-4 ) )
| 246
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 654
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _a ( __lowercase=None ) -> Any:
"""simple docstring"""
if subparsers is not None:
__UpperCamelCase = subparsers.add_parser('test' )
else:
__UpperCamelCase = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=__lowercase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=__lowercase )
return parser
def _a ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase = script_name
else:
__UpperCamelCase = F"""--config_file={args.config_file} {script_name}"""
__UpperCamelCase = ['accelerate-launch'] + test_args.split()
__UpperCamelCase = execute_subprocess_async(__lowercase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _a ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = test_command_parser()
__UpperCamelCase = parser.parse_args()
test_command(__lowercase )
if __name__ == "__main__":
main()
| 567
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['ConvNextFeatureExtractor']
_snake_case = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 567
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=2 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> Union[str, Any]:
lowercase : Optional[int] = parent
lowercase : Optional[Any] = 1_3
lowercase : List[Any] = 7
lowercase : Tuple = True
lowercase : List[str] = True
lowercase : List[str] = True
lowercase : int = True
lowercase : int = 9_9
lowercase : Dict = 3_8_4
lowercase : int = 2
lowercase : Optional[int] = 4
lowercase : Dict = 3_7
lowercase : List[Any] = "gelu"
lowercase : Optional[Any] = 0.1
lowercase : Optional[int] = 0.1
lowercase : Optional[int] = 5_1_2
lowercase : str = 1_6
lowercase : List[str] = 2
lowercase : Optional[int] = 0.02
lowercase : Union[str, Any] = 3
lowercase : Optional[Any] = 4
lowercase : Any = 1_2_8
lowercase : List[Any] = 2
lowercase : List[Any] = 9
lowercase : Any = 1
lowercase : Tuple = None
def a__ ( self ) -> Dict:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any = None
if self.use_input_mask:
lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[Any] = None
lowercase : Optional[Any] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=a_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : List[Any] = TFConvBertModel(config=a_ )
lowercase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase : Tuple = [input_ids, input_mask]
lowercase : List[Any] = model(a_ )
lowercase : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
lowercase : Optional[int] = TFConvBertForMaskedLM(config=a_ )
lowercase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase : int = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Any:
lowercase : List[str] = self.num_labels
lowercase : str = TFConvBertForSequenceClassification(config=a_ )
lowercase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase : str = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> str:
lowercase : List[Any] = self.num_choices
lowercase : Dict = TFConvBertForMultipleChoice(config=a_ )
lowercase : List[Any] = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[int] = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase : Dict = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
lowercase : Optional[int] = self.num_labels
lowercase : Optional[int] = TFConvBertForTokenClassification(config=a_ )
lowercase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase : List[str] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
lowercase : Optional[int] = TFConvBertForQuestionAnswering(config=a_ )
lowercase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase : Dict = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self ) -> Union[str, Any]:
lowercase : List[str] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict = config_and_inputs
lowercase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
def a__ ( self ) -> int:
lowercase : Dict = TFConvBertModelTester(self )
lowercase : Union[str, Any] = ConfigTester(self , config_class=a_ , hidden_size=3_7 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> str:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def a__ ( self ) -> Tuple:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def a__ ( self ) -> Dict:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def a__ ( self ) -> Union[str, Any]:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def a__ ( self ) -> Tuple:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def a__ ( self ) -> List[str]:
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int = True
lowercase : Any = True
if hasattr(a_ , "use_cache" ):
lowercase : Any = True
lowercase : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowercase : Tuple = getattr(self.model_tester , "key_length" , a_ )
for model_class in self.all_model_classes:
lowercase : Dict = self._prepare_for_class(a_ , a_ )
lowercase : Optional[Any] = model_class(a_ )
lowercase : Optional[int] = len(model(a_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ , saved_model=a_ )
lowercase : int = os.path.join(a_ , "saved_model" , "1" )
lowercase : Any = tf.keras.models.load_model(a_ )
lowercase : List[Any] = model(a_ )
if self.is_encoder_decoder:
lowercase : Optional[Any] = outputs["encoder_hidden_states"]
lowercase : Optional[int] = outputs["encoder_attentions"]
else:
lowercase : Union[str, Any] = outputs["hidden_states"]
lowercase : Any = outputs["attentions"]
self.assertEqual(len(a_ ) , a_ )
lowercase : Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a_ ) , a_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self ) -> int:
lowercase : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(a_ )
def a__ ( self ) -> Optional[int]:
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = True
lowercase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
lowercase : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowercase : Optional[int] = getattr(self.model_tester , "key_length" , a_ )
lowercase : List[Any] = getattr(self.model_tester , "key_length" , a_ )
def check_decoder_attentions_output(a_ ):
lowercase : int = len(a_ )
self.assertEqual(out_len % 2 , 0 )
lowercase : List[Any] = outputs.decoder_attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(a_ ):
lowercase : Union[str, Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase : List[str] = True
lowercase : List[str] = False
lowercase : Optional[int] = model_class(a_ )
lowercase : int = model(self._prepare_for_class(a_ , a_ ) )
lowercase : Optional[Any] = len(a_ )
self.assertEqual(config.output_hidden_states , a_ )
check_encoder_attentions_output(a_ )
if self.is_encoder_decoder:
lowercase : List[str] = model_class(a_ )
lowercase : Optional[int] = model(self._prepare_for_class(a_ , a_ ) )
self.assertEqual(config.output_hidden_states , a_ )
check_decoder_attentions_output(a_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase : Optional[Any] = True
lowercase : str = model_class(a_ )
lowercase : Union[str, Any] = model(self._prepare_for_class(a_ , a_ ) )
self.assertEqual(config.output_hidden_states , a_ )
check_encoder_attentions_output(a_ )
# Check attention is always last and order is fine
lowercase : Optional[int] = True
lowercase : str = True
lowercase : Optional[Any] = model_class(a_ )
lowercase : Any = model(self._prepare_for_class(a_ , a_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a_ ) )
self.assertEqual(model.config.output_hidden_states , a_ )
check_encoder_attentions_output(a_ )
@require_tf
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> List[Any]:
lowercase : Optional[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
lowercase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Any = model(a_ )[0]
lowercase : List[str] = [1, 6, 7_6_8]
self.assertEqual(output.shape , a_ )
lowercase : Dict = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a_ , atol=1e-4 )
| 372
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = """Hello world! cécé herlolip"""
def _A ( A ,A ,A ) -> str:
lowercase : Optional[Any] = FairseqRobertaModel.from_pretrained(A )
roberta.eval() # disable dropout
lowercase : int = roberta.model.encoder.sentence_encoder
lowercase : Tuple = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_1_4 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,)
if classification_head:
lowercase : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,A )
lowercase : Union[str, Any] = XLMRobertaXLForSequenceClassification(A ) if classification_head else XLMRobertaXLForMaskedLM(A )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
lowercase : Dict = roberta_sent_encoder.embed_positions.weight
lowercase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase : Dict = roberta_sent_encoder.layer_norm.weight
lowercase : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase : BertLayer = model.roberta.encoder.layer[i]
lowercase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowercase : RobertaAttention = layer.attention
lowercase : str = roberta_layer.self_attn_layer_norm.weight
lowercase : List[str] = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase : str = roberta_layer.self_attn.q_proj.weight
lowercase : List[str] = roberta_layer.self_attn.q_proj.bias
lowercase : Union[str, Any] = roberta_layer.self_attn.k_proj.weight
lowercase : List[Any] = roberta_layer.self_attn.k_proj.bias
lowercase : Dict = roberta_layer.self_attn.v_proj.weight
lowercase : Tuple = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase : Tuple = roberta_layer.self_attn.out_proj.weight
lowercase : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase : List[Any] = roberta_layer.final_layer_norm.weight
lowercase : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
lowercase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase : Optional[int] = roberta_layer.fca.weight
lowercase : Dict = roberta_layer.fca.bias
# output
lowercase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase : Optional[int] = roberta_layer.fca.weight
lowercase : str = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase : int = roberta.model.classification_heads["mnli"].dense.weight
lowercase : List[Any] = roberta.model.classification_heads["mnli"].dense.bias
lowercase : Union[str, Any] = roberta.model.classification_heads["mnli"].out_proj.weight
lowercase : Any = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowercase : str = roberta.model.encoder.lm_head.dense.weight
lowercase : List[Any] = roberta.model.encoder.lm_head.dense.bias
lowercase : str = roberta.model.encoder.lm_head.layer_norm.weight
lowercase : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowercase : int = roberta.model.encoder.lm_head.weight
lowercase : Optional[int] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase : torch.Tensor = roberta.encode(A ).unsqueeze(0 ) # batch of size 1
lowercase : Tuple = model(A )[0]
if classification_head:
lowercase : Union[str, Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(A ) )
else:
lowercase : Optional[Any] = roberta.model(A )[0]
print(our_output.shape ,their_output.shape )
lowercase : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase : Dict = torch.allclose(A ,A ,atol=1e-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(A ).mkdir(parents=A ,exist_ok=A )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 372
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712
|
from __future__ import annotations
import numpy as np
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = np.shape(UpperCamelCase__ )
if rows != columns:
__lowerCamelCase = (
'\'table\' has to be of square shaped array but got a '
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(UpperCamelCase__ )
__lowerCamelCase = np.zeros((rows, columns) )
__lowerCamelCase = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
__lowerCamelCase = (table[i][j] - total) / upper[j][j]
__lowerCamelCase = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
__lowerCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
| 0
|
def __a ( lowerCAmelCase_ : int = 10_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_= -1
UpperCAmelCase_= 0
for a in range(1 ,n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase_= (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase_= n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase_= a * b * c
if candidate >= product:
UpperCAmelCase_= candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 593
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __a ( ) -> int:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= 9, 14 # noqa: F841
UpperCAmelCase_= [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_= defaultdict(lowerCAmelCase_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase_= mst(lowerCAmelCase_ )
UpperCAmelCase_= [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase_= tuple(answer[:2] )
UpperCAmelCase_= tuple(edge[::-1] )
assert edge in result or reverse in result
| 593
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case_ (lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoCBertTokenizer
_lowerCamelCase = None
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = filter_non_english
def A_ ( self):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Tuple = {}
for i, value in enumerate(lowercase):
UpperCAmelCase_ : Tuple = i
UpperCAmelCase_ : int = i
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"])
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["word_shape_file"])
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file ,"w" ,encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file ,"w" ,encoding="utf-8") as word_shape_writer:
json.dump(lowercase ,lowercase ,ensure_ascii=lowercase)
with open(self.word_pronunciation_file ,"w" ,encoding="utf-8") as word_pronunciation_writer:
json.dump(lowercase ,lowercase ,ensure_ascii=lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file)
UpperCAmelCase_ : Dict = tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(lowercase ,["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) ,[5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase) ,[5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase) ,[5, 6, 2, 5, 7, 8])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") ,["ah", "\u535A", "\u63A8", "zz"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Any = RoCBertBasicTokenizer(do_lower_case=lowercase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") ,["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") ,["hello"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowercase ,strip_accents=lowercase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") ,["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") ,["h\u00E9llo"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = RoCBertBasicTokenizer(do_lower_case=lowercase ,strip_accents=lowercase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") ,["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") ,["hello"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") ,["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") ,["hello"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowercase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") ,["HeLLo", "!", "how", "Are", "yoU", "?"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase ,strip_accents=lowercase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") ,["HäLLo", "!", "how", "Are", "yoU", "?"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = RoCBertBasicTokenizer(do_lower_case=lowercase ,strip_accents=lowercase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") ,["HaLLo", "!", "how", "Are", "yoU", "?"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowercase ,never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") ,["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ : str = {}
for i, token in enumerate(lowercase):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : Any = RoCBertWordpieceTokenizer(vocab=lowercase ,unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") ,[])
self.assertListEqual(tokenizer.tokenize("unwanted running") ,["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") ,["[UNK]", "runn", "##ing"])
def A_ ( self):
"""simple docstring"""
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def A_ ( self):
"""simple docstring"""
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def A_ ( self):
"""simple docstring"""
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase) for t in ["Test", "\xad", "test"]] ,[["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
UpperCAmelCase_ : Dict = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase) for t in ["Test", "\xad", "test"]] ,[["[UNK]"], [], ["[UNK]"]])
def A_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(lowercase ,**lowercase)
UpperCAmelCase_ : Tuple = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ : str = tokenizer_r.encode_plus(
lowercase ,return_attention_mask=lowercase ,return_token_type_ids=lowercase ,return_offsets_mapping=lowercase ,add_special_tokens=lowercase ,)
UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowercase ,"do_lower_case") else False
UpperCAmelCase_ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] ,tokens["offset_mapping"])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = ["的", "人", "有"]
UpperCAmelCase_ : str = "".join(lowercase)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowercase ,**lowercase)
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase ,**lowercase)
UpperCAmelCase_ : Optional[int] = tokenizer_p.encode(lowercase ,add_special_tokens=lowercase)
UpperCAmelCase_ : Optional[Any] = tokenizer_r.encode(lowercase ,add_special_tokens=lowercase)
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowercase)
UpperCAmelCase_ : List[Any] = tokenizer_p.convert_ids_to_tokens(lowercase)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase ,lowercase)
self.assertListEqual(lowercase ,lowercase)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowercase ,**lowercase)
UpperCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(lowercase ,**lowercase)
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.encode(lowercase ,add_special_tokens=lowercase)
UpperCAmelCase_ : Union[str, Any] = tokenizer_p.encode(lowercase ,add_special_tokens=lowercase)
UpperCAmelCase_ : List[Any] = tokenizer_r.convert_ids_to_tokens(lowercase)
UpperCAmelCase_ : List[Any] = tokenizer_p.convert_ids_to_tokens(lowercase)
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ : Dict = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowercase)
]
self.assertListEqual(lowercase ,lowercase)
self.assertListEqual(lowercase ,lowercase)
@slow
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file)
UpperCAmelCase_ : List[Any] = tokenizer.encode("你好" ,add_special_tokens=lowercase)
UpperCAmelCase_ : int = tokenizer.encode("你是谁" ,add_special_tokens=lowercase)
UpperCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(lowercase)
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase ,lowercase)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_tokenizers(do_lower_case=lowercase)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ : Union[str, Any] = "你好,你是谁"
UpperCAmelCase_ : Dict = tokenizer.tokenize(lowercase)
UpperCAmelCase_ : Tuple = tokenizer.convert_tokens_to_ids(lowercase)
UpperCAmelCase_ : int = tokenizer.convert_tokens_to_shape_ids(lowercase)
UpperCAmelCase_ : Dict = tokenizer.convert_tokens_to_pronunciation_ids(lowercase)
UpperCAmelCase_ : Any = tokenizer.prepare_for_model(
lowercase ,lowercase ,lowercase ,add_special_tokens=lowercase)
UpperCAmelCase_ : Optional[int] = tokenizer.encode_plus(lowercase ,add_special_tokens=lowercase)
self.assertEqual(lowercase ,lowercase)
| 714
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """umt5"""
_lowerCamelCase = ["""past_key_values"""]
def __init__( self ,lowercase=250112 ,lowercase=512 ,lowercase=64 ,lowercase=1024 ,lowercase=8 ,lowercase=None ,lowercase=6 ,lowercase=32 ,lowercase=128 ,lowercase=0.1 ,lowercase=1E-6 ,lowercase=1.0 ,lowercase="gated-gelu" ,lowercase=True ,lowercase=True ,lowercase="T5Tokenizer" ,lowercase=True ,lowercase=0 ,lowercase=1 ,lowercase=0 ,**lowercase ,):
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowercase ,tokenizer_class=lowercase ,tie_word_embeddings=lowercase ,pad_token_id=lowercase ,eos_token_id=lowercase ,decoder_start_token_id=lowercase ,**lowercase ,)
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Any = d_model
UpperCAmelCase_ : Any = d_kv
UpperCAmelCase_ : int = d_ff
UpperCAmelCase_ : Tuple = num_layers
UpperCAmelCase_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : str = relative_attention_num_buckets
UpperCAmelCase_ : Any = relative_attention_max_distance
UpperCAmelCase_ : Optional[Any] = dropout_rate
UpperCAmelCase_ : Union[str, Any] = layer_norm_epsilon
UpperCAmelCase_ : Optional[Any] = initializer_factor
UpperCAmelCase_ : int = feed_forward_proj
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : List[str] = self.feed_forward_proj.split("-")
UpperCAmelCase_ : Any = act_info[-1]
UpperCAmelCase_ : Optional[int] = act_info[0] == "gated"
if len(lowercase) > 1 and act_info[0] != "gated" or len(lowercase) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : Tuple = "gelu_new"
@property
def A_ ( self):
"""simple docstring"""
return self.d_model
@property
def A_ ( self):
"""simple docstring"""
return self.num_heads
@property
def A_ ( self):
"""simple docstring"""
return self.num_layers
class snake_case_ (lowercase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ : Union[str, Any] = "past_encoder_sequence + sequence"
UpperCAmelCase_ : Optional[int] = {0: "batch"}
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : Optional[int] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : Dict = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase ,direction="inputs")
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A_ ( self):
"""simple docstring"""
return 13
@property
def A_ ( self):
"""simple docstring"""
return 5E-4
| 455
| 0
|
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def A ( UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , UpperCamelCase_ ).groups()[0]
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Any=None , __magic_name__ : List[Any]=None ):
"""simple docstring"""
lowerCAmelCase__ = file_names
lowerCAmelCase__ = image_transform
lowerCAmelCase__ = label_to_id
def __len__( self : Any ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : List[Any] , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.file_names[idx]
lowerCAmelCase__ = PIL.Image.open(__magic_name__ )
lowerCAmelCase__ = raw_image.convert("RGB" )
if self.image_transform is not None:
lowerCAmelCase__ = self.image_transform(__magic_name__ )
lowerCAmelCase__ = extract_label(__magic_name__ )
if self.label_to_id is not None:
lowerCAmelCase__ = self.label_to_id[label]
return {"image": image, "label": label}
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
if args.with_tracking:
lowerCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["lr"]
lowerCAmelCase__ = int(config["num_epochs"] )
lowerCAmelCase__ = int(config["seed"] )
lowerCAmelCase__ = int(config["batch_size"] )
lowerCAmelCase__ = config["image_size"]
if not isinstance(UpperCamelCase_ , (list, tuple) ):
lowerCAmelCase__ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
lowerCAmelCase__ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCAmelCase__ = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCAmelCase__ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCAmelCase__ = os.path.split(UpperCamelCase_ )[-1].split("." )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Grab all the image filenames
lowerCAmelCase__ = [os.path.join(args.data_dir , UpperCamelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
lowerCAmelCase__ = [extract_label(UpperCamelCase_ ) for fname in file_names]
lowerCAmelCase__ = list(set(UpperCamelCase_ ) )
id_to_label.sort()
lowerCAmelCase__ = {lbl: i for i, lbl in enumerate(UpperCamelCase_ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase_ )
torch.manual_seed(UpperCamelCase_ )
torch.cuda.manual_seed_all(UpperCamelCase_ )
# Split our filenames between train and validation
lowerCAmelCase__ = np.random.permutation(len(UpperCamelCase_ ) )
lowerCAmelCase__ = int(0.8 * len(UpperCamelCase_ ) )
lowerCAmelCase__ = random_perm[:cut]
lowerCAmelCase__ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCAmelCase__ = Compose([RandomResizedCrop(UpperCamelCase_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCAmelCase__ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase_ , label_to_id=UpperCamelCase_ )
# For evaluation, we use a deterministic Resize
lowerCAmelCase__ = Compose([Resize(UpperCamelCase_ ), ToTensor()] )
lowerCAmelCase__ = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase_ , label_to_id=UpperCamelCase_ )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
lowerCAmelCase__ = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = create_model("resnet50d" , pretrained=UpperCamelCase_ , num_classes=len(UpperCamelCase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCAmelCase__ = False
for param in model.get_classifier().parameters():
lowerCAmelCase__ = True
# We normalize the batches of images to be a bit faster.
lowerCAmelCase__ = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
lowerCAmelCase__ = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCAmelCase__ = OneCycleLR(optimizer=UpperCamelCase_ , max_lr=UpperCamelCase_ , epochs=UpperCamelCase_ , steps_per_epoch=len(UpperCamelCase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCAmelCase__ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase__ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCAmelCase__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCAmelCase__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCAmelCase__ = os.path.splitext(UpperCamelCase_ )[0]
if "epoch" in training_difference:
lowerCAmelCase__ = int(training_difference.replace("epoch_" , "" ) ) + 1
lowerCAmelCase__ = None
else:
lowerCAmelCase__ = int(training_difference.replace("step_" , "" ) )
lowerCAmelCase__ = resume_step // len(UpperCamelCase_ )
resume_step -= starting_epoch * len(UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ , UpperCamelCase_ ):
model.train()
if args.with_tracking:
lowerCAmelCase__ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCAmelCase__ = accelerator.skip_first_batches(UpperCamelCase_ , UpperCamelCase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCAmelCase__ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase__ = (batch["image"] - mean) / std
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ = torch.nn.functional.cross_entropy(UpperCamelCase_ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCAmelCase__ = os.path.join(args.output_dir , UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase__ = (batch["image"] - mean) / std
with torch.no_grad():
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ = outputs.argmax(dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["label"]) )
lowerCAmelCase__ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCAmelCase__ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {1_00 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 1_00 * eval_metric,
"train_loss": total_loss.item() / len(UpperCamelCase_ ),
"epoch": epoch,
} , step=UpperCamelCase_ , )
if checkpointing_steps == "epoch":
lowerCAmelCase__ = F"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCAmelCase__ = os.path.join(args.output_dir , UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
if args.with_tracking:
accelerator.end_training()
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=UpperCamelCase_ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=UpperCamelCase_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCamelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 2_24}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 48
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE: str = _symbol_database.Default()
SCREAMING_SNAKE_CASE: Dict = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
SCREAMING_SNAKE_CASE: Optional[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE: List[Any] = None
SCREAMING_SNAKE_CASE: str = b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE: List[Any] = 4_5
SCREAMING_SNAKE_CASE: List[Any] = 1_5_8_1
SCREAMING_SNAKE_CASE: Union[str, Any] = 1_5_1_7
SCREAMING_SNAKE_CASE: Any = 1_5_7_0
SCREAMING_SNAKE_CASE: Union[str, Any] = 1_5_8_4
SCREAMING_SNAKE_CASE: str = 1_7_9_3
SCREAMING_SNAKE_CASE: List[Any] = 1_7_9_5
SCREAMING_SNAKE_CASE: Any = 1_9_1_6
SCREAMING_SNAKE_CASE: Optional[Any] = 1_8_6_4
SCREAMING_SNAKE_CASE: Union[str, Any] = 1_9_0_5
SCREAMING_SNAKE_CASE: str = 1_9_1_9
SCREAMING_SNAKE_CASE: Any = 2_4_2_9
SCREAMING_SNAKE_CASE: Dict = 2_2_0_8
SCREAMING_SNAKE_CASE: Optional[Any] = 2_4_1_8
SCREAMING_SNAKE_CASE: Optional[Any] = 2_3_2_3
SCREAMING_SNAKE_CASE: Tuple = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 360
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = (UnCLIPScheduler,)
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def snake_case__ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def snake_case__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def snake_case__ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_UpperCAmelCase )
def snake_case__ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def snake_case__ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_UpperCAmelCase , prev_timestep=_UpperCAmelCase )
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="fixed_small_log" )
__lowercase = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="learned_range" )
__lowercase = scheduler_class(**_UpperCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_UpperCAmelCase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_UpperCAmelCase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_UpperCAmelCase ) - -0.0_01_00_11 < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_UpperCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_UpperCAmelCase ):
# 1. predict noise residual
__lowercase = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_UpperCAmelCase ) )
__lowercase = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_UpperCAmelCase ):
# 1. predict noise residual
__lowercase = model(_UpperCAmelCase , _UpperCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , prev_timestep=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_UpperCAmelCase ) )
__lowercase = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
| 714
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCAmelCase__ = model.state_dict()
lowerCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
lowerCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
lowerCAmelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCAmelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"]
lowerCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 576
| 0
|
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self , _a ) -> None:
_A : int = num_of_nodes
_A : list[list[int]] = []
_A : dict[int, int] = {}
def a__ ( self , _a , _a , _a ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def a__ ( self , _a ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def a__ ( self , _a ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_A : List[str] = self.find_component(_a )
def a__ ( self , _a , _a , _a ) -> None:
if component_size[u_node] <= component_size[v_node]:
_A : Union[str, Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_a )
elif component_size[u_node] >= component_size[v_node]:
_A : str = self.find_component(_a )
component_size[u_node] += component_size[v_node]
self.set_component(_a )
def a__ ( self ) -> None:
_A : Tuple = []
_A : Tuple = 0
_A : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_A : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_A , _A , _A : str = edge
_A : Dict = self.m_component[u]
_A : int = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_A : List[str] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_a , _a ):
_A , _A , _A : int = edge
_A : Any = self.m_component[u]
_A : int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_a , _a , _a )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_A : Tuple = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def lowerCAmelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
from __future__ import annotations
import bisect
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
if hi < 0:
_A : Optional[Any] = len(snake_case_ )
while lo < hi:
_A : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_A : List[str] = mid + 1
else:
_A : Optional[int] = mid
return lo
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
if hi < 0:
_A : Any = len(snake_case_ )
while lo < hi:
_A : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_A : Tuple = mid + 1
else:
_A : Optional[Any] = mid
return lo
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
sorted_collection.insert(bisect_left(snake_case_,snake_case_,snake_case_,snake_case_ ),snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
sorted_collection.insert(bisect_right(snake_case_,snake_case_,snake_case_,snake_case_ ),snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = 0
_A : str = len(snake_case_ ) - 1
while left <= right:
_A : Dict = left + (right - left) // 2
_A : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_A : int = midpoint - 1
else:
_A : int = midpoint + 1
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : str = bisect.bisect_left(snake_case_,snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
if right < left:
return None
_A : Dict = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_,snake_case_,snake_case_,midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_,snake_case_,midpoint + 1,snake_case_ )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 307
| 1
|
"""simple docstring"""
def a ( __UpperCAmelCase : int = 1_0_0_0 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 213
|
"""simple docstring"""
from __future__ import annotations
def a ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int ) -> list[int]:
__magic_name__: int = 0
__magic_name__: List[str] = len(__UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__magic_name__: Dict = i + 1
else:
__magic_name__: Tuple = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 213
| 1
|
'''simple docstring'''
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ : Any = os.path.join(git_repo_path, '''src''', '''transformers''')
SCREAMING_SNAKE_CASE__ : List[Any] = '''\n{0} = None\n'''
SCREAMING_SNAKE_CASE__ : Dict = '''\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'''
SCREAMING_SNAKE_CASE__ : Dict = '''\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'''
class a__( unittest.TestCase ):
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(_A )
snake_case__ =find_backend(' if not is_tokenizers_available():' )
self.assertEqual(_A , 'tokenizers' )
snake_case__ =find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(_A , 'tensorflow_text' )
snake_case__ =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(_A , 'sentencepiece_and_tokenizers' )
snake_case__ =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(_A , 'sentencepiece_and_tensorflow_text' )
snake_case__ =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(_A , 'sentencepiece_and_tokenizers_and_vision' )
def _lowercase ( self ) -> str:
snake_case__ =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _A )
self.assertIn('tensorflow_text' , _A )
self.assertIn('sentencepiece_and_tokenizers' , _A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(_A , '\nCONSTANT = None\n' )
snake_case__ =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
_A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case__ ='''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
snake_case__ =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(_A , _A )
def _lowercase ( self ) -> Dict:
snake_case__ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
snake_case__ =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , _A )
| 538
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
assert isinstance(__magic_name__ , __magic_name__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowercase : Union[str, Any] = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(__magic_name__ )
else:
lowercase : int = sylvester(number - 1 )
lowercase : List[Any] = num - 1
lowercase : str = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 217
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A_ : List[str] =logging.getLogger()
A_ : Optional[int] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCAmelCase ( __a ):
def UpperCAmelCase_ ( self , _lowerCamelCase ):
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowerCAmelCase_ = {'''source''': '''What is love ?''', '''target''': '''life'''}
lowerCAmelCase_ = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase_ = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(_lowerCamelCase , F'''{split}.{field}''' ) , '''w''' ) as f:
f.write(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = "pytorch" ):
lowerCAmelCase_ = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ = os.path.join(_lowerCamelCase , '''output''' )
lowerCAmelCase_ = os.path.join(_lowerCamelCase , '''data''' )
self._create_dummy_data(data_dir=_lowerCamelCase )
lowerCAmelCase_ = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
lowerCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_lowerCamelCase , env=self.get_env() )
lowerCAmelCase_ = os.path.join(_lowerCamelCase , '''metrics.json''' )
with open(_lowerCamelCase ) as f:
lowerCAmelCase_ = json.load(_lowerCamelCase )
return result
@require_torch_gpu
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 606
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
A_ : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(__a )
class __UpperCAmelCase ( __a ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCAmelCase_ = {}
if frame_sampling_rate is not None:
lowerCAmelCase_ = frame_sampling_rate
if num_frames is not None:
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = {}
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _lowerCamelCase , **_lowerCamelCase ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=1 ):
if num_frames is None:
lowerCAmelCase_ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowerCAmelCase_ = BytesIO(requests.get(_lowerCamelCase ).content )
lowerCAmelCase_ = VideoReader(_lowerCamelCase )
videoreader.seek(0 )
lowerCAmelCase_ = 0
lowerCAmelCase_ = num_frames * frame_sampling_rate - 1
lowerCAmelCase_ = np.linspace(_lowerCamelCase , _lowerCamelCase , num=_lowerCamelCase , dtype=np.intaa )
lowerCAmelCase_ = videoreader.get_batch(_lowerCamelCase ).asnumpy()
lowerCAmelCase_ = list(_lowerCamelCase )
lowerCAmelCase_ = self.image_processor(_lowerCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = self.model(**_lowerCamelCase )
return model_outputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ ,lowerCAmelCase_ = probs.topk(_lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 606
| 1
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = (DPMSolverSDEScheduler,)
__SCREAMING_SNAKE_CASE = 1_0
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def __lowerCamelCase ( self ) -> Union[str, Any]:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
__UpperCamelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCamelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__SCREAMING_SNAKE_CASE , use_karras_sigmas=__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(__SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
__UpperCamelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 601
|
'''simple docstring'''
UpperCAmelCase : Tuple = range(2, 2_0 + 1)
UpperCAmelCase : int = [1_0**k for k in range(ks[-1] + 1)]
UpperCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def a__ ( a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(a_i[j] for j in range(a__ , len(a__ ) ) )
__SCREAMING_SNAKE_CASE = sum(a_i[j] * base[j] for j in range(min(len(a__ ) , a__ ) ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
__SCREAMING_SNAKE_CASE = n - i
__SCREAMING_SNAKE_CASE = memo.get(a__ )
if sub_memo is not None:
__SCREAMING_SNAKE_CASE = sub_memo.get(a__ )
if jumps is not None and len(a__ ) > 0:
# find and make the largest jump without going over
__SCREAMING_SNAKE_CASE = -1
for _k in range(len(a__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__SCREAMING_SNAKE_CASE = _k
break
if max_jump >= 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = jumps[max_jump]
# since the difference between jumps is cached, add c
__SCREAMING_SNAKE_CASE = diff + c
for j in range(min(a__ , len(a__ ) ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = divmod(a__ , 10 )
if new_c > 0:
add(a__ , a__ , a__ )
else:
__SCREAMING_SNAKE_CASE = []
else:
__SCREAMING_SNAKE_CASE = {c: []}
__SCREAMING_SNAKE_CASE = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = next_term(a__ , k - 1 , i + dn , a__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = compute(a__ , a__ , i + dn , a__ )
diff += _diff
dn += terms_jumped
__SCREAMING_SNAKE_CASE = sub_memo[c]
# keep jumps sorted by # of terms skipped
__SCREAMING_SNAKE_CASE = 0
while j < len(a__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(a__ , (diff, dn, k) )
return (diff, dn)
def a__ ( a__ , a__ , a__ , a__ ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(a__ ):
a_i.extend([0 for _ in range(k - len(a__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0, 0
for j in range(len(a__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__SCREAMING_SNAKE_CASE = ds_c + ds_b
diff += addend
__SCREAMING_SNAKE_CASE = 0
for j in range(a__ ):
__SCREAMING_SNAKE_CASE = a_i[j] + addend
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = divmod(a__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(a__ , a__ , a__ )
return diff, i - start_i
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
for j in range(a__ , len(a__ ) ):
__SCREAMING_SNAKE_CASE = digits[j] + addend
if s >= 10:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = divmod(a__ , 10 )
__SCREAMING_SNAKE_CASE = addend // 10 + quotient
else:
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = addend // 10
if addend == 0:
break
while addend > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = divmod(a__ , 10 )
digits.append(a__ )
def a__ ( a__ = 10**15 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1]
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
while True:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = next_term(a__ , 20 , i + dn , a__ )
dn += terms_jumped
if dn == n - i:
break
__SCREAMING_SNAKE_CASE = 0
for j in range(len(a__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 627
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A ( a , a ):
__UpperCAmelCase : Optional[Any] = """dinat"""
__UpperCAmelCase : Dict = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=4 , snake_case_=3 , snake_case_=6_4 , snake_case_=[3, 4, 6, 5] , snake_case_=[2, 4, 8, 1_6] , snake_case_=7 , snake_case_=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , snake_case_=3.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(snake_case_ )
_a = num_heads
_a = kernel_size
_a = dilations
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
_a = layer_scale_init_value
_a = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 691
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ : List[Any] = IFInpaintingPipeline
__magic_name__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__magic_name__ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__magic_name__ : List[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase__ ( self : Any ) -> int:
"""simple docstring"""
return self._get_dummy_components()
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCAmelCase ).startswith("""mps""" ):
__UpperCamelCase : List[str] = torch.manual_seed(lowerCAmelCase )
else:
__UpperCamelCase : str = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__UpperCamelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
__UpperCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
__UpperCamelCase : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
self._test_save_load_local()
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 279
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = SMALL_MODEL_IDENTIFIER
__UpperCamelCase : List[str] = """pt"""
__UpperCamelCase : List[Any] = """tf"""
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase )
model_tf.save_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Any = """mock_framework"""
# Framework provided - return whatever the user provides
__UpperCamelCase : List[Any] = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__UpperCamelCase : Any = FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__UpperCamelCase : Optional[Any] = FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : int = FeaturesManager.determine_framework(lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : str = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ):
__UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCamelCase : Optional[int] = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
__UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCamelCase : Dict = MagicMock(return_value=lowerCAmelCase )
__UpperCamelCase : Any = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
__UpperCamelCase : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
__UpperCamelCase : Union[str, Any] = MagicMock(return_value=lowerCAmelCase )
__UpperCamelCase : Tuple = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : str = FeaturesManager.determine_framework(self.test_model )
| 279
| 1
|
"""simple docstring"""
from PIL import Image
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
lowercase , lowercase = image.size
lowercase = 0
lowercase = image.load()
for i in range(__A ):
for j in range(__A ):
lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
lowercase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase : Tuple =mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 701
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 5_0_0_0 ) -> int:
'''simple docstring'''
lowercase = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase__ )]
for i, pentagonal_i in enumerate(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
lowercase = pentagonal_nums[j]
lowercase = pentagonal_i + pentagonal_j
lowercase = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase__ ) and is_pentagonal(lowerCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 197
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( A__ , unittest.TestCase ):
A__ = BioGptTokenizer
A__ = False
def A ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_a ) )
def A ( self : Optional[int] , _a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE ='lower newer'
return input_text, output_text
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptTokenizer(self.vocab_file , self.merges_file )
_SCREAMING_SNAKE_CASE ='lower'
_SCREAMING_SNAKE_CASE =['low', 'er</w>']
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =tokens + ['<unk>']
_SCREAMING_SNAKE_CASE =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@slow
def A ( self : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_SCREAMING_SNAKE_CASE =tokenizer.encode('sequence builders' , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =tokenizer.encode('multi-sequence build' , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =tokenizer.build_inputs_with_special_tokens(_a )
_SCREAMING_SNAKE_CASE =tokenizer.build_inputs_with_special_tokens(_a , _a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 405
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase : Optional[Any] = False
try:
lowerCamelCase : Union[str, Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class A__ :
def __init__( self : Tuple , _a : str = None , _a : list = [] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =choices
_SCREAMING_SNAKE_CASE =prompt
if sys.platform == "win32":
_SCREAMING_SNAKE_CASE ='*'
else:
_SCREAMING_SNAKE_CASE ='➔ '
def A ( self : Dict , _a : Union[str, Any] , _a : str = "" ) -> Dict:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _a )
else:
forceWrite(self.choices[index] , _a )
def A ( self : str , _a : int ) -> int:
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(_a )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def A ( self : Tuple , _a : Direction , _a : int = 1 ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(10 )] )
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =int(chr(self.current_selection ) )
_SCREAMING_SNAKE_CASE =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def A ( self : str , _a : int = 0 ) -> Optional[Any]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
_SCREAMING_SNAKE_CASE =default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
_SCREAMING_SNAKE_CASE =int(builtins.input() )
except ValueError:
_SCREAMING_SNAKE_CASE =default_choice
else:
_SCREAMING_SNAKE_CASE =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(_a , '\n' )
return choice
| 405
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : List[Any] = torch.load(__UpperCAmelCase , map_location='cpu' )
if "model" in sd.keys():
__snake_case : List[Any] = torch.load(__UpperCAmelCase , map_location='cpu' )['model']
# pop unnecessary weights
__snake_case : List[Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCAmelCase )
__snake_case : List[str] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__snake_case : List[str] = sd.pop(__UpperCAmelCase )
__snake_case : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__snake_case : Dict = sd[key]
# We split QKV in separate Q,K,V
__snake_case : Dict = key.replace('.qkv_proj.' , '.q_proj.' )
__snake_case : Optional[Any] = key.replace('.qkv_proj.' , '.k_proj.' )
__snake_case : Optional[int] = key.replace('.qkv_proj.' , '.v_proj.' )
__snake_case : Optional[int] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__snake_case , __snake_case , __snake_case : Any = torch.split(__UpperCAmelCase , depth // 3 , dim=0 )
__snake_case : Tuple = q
__snake_case : Optional[Any] = k
__snake_case : Optional[Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=None ):
__snake_case : Optional[Any] = load_checkpoint(__UpperCAmelCase )
if config is not None:
__snake_case : str = OPTConfig.from_pretrained(__UpperCAmelCase )
else:
__snake_case : str = OPTConfig()
__snake_case : List[Any] = OPTModel(__UpperCAmelCase ).half().eval()
model.load_state_dict(__UpperCAmelCase )
# Check results
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__magic_name__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 679
|
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__lowercase : str ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase : Dict ={
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__lowercase : Optional[int] ={
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
__lowercase : int ={
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class A ( __lowercase ):
_snake_case =VOCAB_FILES_NAMES
_snake_case =PRETRAINED_VOCAB_FILES_MAP
_snake_case =PRETRAINED_INIT_CONFIGURATION
_snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case =ElectraTokenizer
def __init__( self: Dict , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: Optional[int]="[UNK]" , _lowerCAmelCase: int="[SEP]" , _lowerCAmelCase: Tuple="[PAD]" , _lowerCAmelCase: int="[CLS]" , _lowerCAmelCase: Union[str, Any]="[MASK]" , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: str=None , **_lowerCAmelCase: Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ =getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ =do_lower_case
UpperCAmelCase_ =strip_accents
UpperCAmelCase_ =tokenize_chinese_chars
UpperCAmelCase_ =normalizer_class(**_lowerCAmelCase )
UpperCAmelCase_ =do_lower_case
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: int=None ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ =[self.sep_token_id]
UpperCAmelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase_ =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return choice(lowerCamelCase_ )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = random_pivot(lowerCamelCase_ )
# partition based on pivot
# linear time
_UpperCAmelCase = [e for e in lst if e < pivot]
_UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase_ ) < k - 1:
return kth_number(lowerCamelCase_,k - len(lowerCamelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase_,lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowerCAmelCase__ = """CIDAS/clipseg-rd64-refined"""
lowerCAmelCase__ = """image_segmenter"""
lowerCAmelCase__ = CLIPSegForImageSegmentation
lowerCAmelCase__ = ["""image""", """text"""]
lowerCAmelCase__ = ["""image"""]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['vision'] )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
return self.pre_processor(text=[label] , images=[image] , padding=a__ , return_tensors='pt' )
def __A ( self , a__ ):
with torch.no_grad():
_UpperCAmelCase = self.model(**a__ ).logits
return logits
def __A ( self , a__ ):
_UpperCAmelCase = outputs.cpu().detach().numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 494
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCAmelCase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCAmelCase_ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class _lowerCAmelCase ( __UpperCAmelCase ):
"""simple docstring"""
_lowerCamelCase = ['pixel_values']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BILINEAR , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 2_5_5 , _lowercase = True , _lowercase = None , _lowercase = None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__lowercase )
snake_case_ : Tuple = size if size is not None else {'''shortest_edge''': 2_2_4}
snake_case_ : str = get_size_dict(__lowercase , default_to_square=__lowercase )
snake_case_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
snake_case_ : Union[str, Any] = get_size_dict(__lowercase , param_name="""crop_size""" )
snake_case_ : Any = do_resize
snake_case_ : Tuple = size
snake_case_ : str = do_center_crop
snake_case_ : Union[str, Any] = crop_size
snake_case_ : Optional[Any] = resample
snake_case_ : Union[str, Any] = do_rescale
snake_case_ : int = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BILINEAR , _lowercase = None , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" in size:
snake_case_ : Union[str, Any] = get_resize_output_image_size(__lowercase , size["""shortest_edge"""] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
snake_case_ : List[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , ) -> str:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : Tuple = to_numpy_array(__lowercase )
if do_resize:
snake_case_ : Tuple = self.resize(image=__lowercase , size=__lowercase , resample=__lowercase )
if do_center_crop:
snake_case_ : Union[str, Any] = self.center_crop(__lowercase , size=__lowercase )
if do_rescale:
snake_case_ : int = self.rescale(image=__lowercase , scale=__lowercase )
if do_normalize:
snake_case_ : int = self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase )
snake_case_ : Optional[Any] = to_channel_dimension_format(__lowercase , __lowercase )
return image
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[Any] = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(__lowercase , param_name="""crop_size""" )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
snake_case_ : Union[str, Any] = make_batched(__lowercase )
snake_case_ : List[str] = [
[
self._preprocess_image(
image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , )
for img in video
]
for video in videos
]
snake_case_ : Dict = {'''pixel_values''': videos}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 58
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ :Union[str, Any] = HfApi()
lowercase__ :Optional[Any] = {}
# fmt: off
lowercase__ :Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowercase__ :Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowercase__ :Optional[Any] = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowercase__ :List[Any] = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowercase__ :List[Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowercase__ :Optional[int] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowercase__ :Optional[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowercase__ :List[str] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowercase__ :str = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowercase__ :Union[str, Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowercase__ :List[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowercase__ :Optional[Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowercase__ :Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowercase__ :int = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowercase__ :List[str] = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowercase__ :Union[str, Any] = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ :List[Any] = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
lowercase__ :str = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
lowercase__ :List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ :Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ :List[Any] = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
lowercase__ :int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 522
| 0
|
'''simple docstring'''
import operator
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple = False , lowerCamelCase : List[str] = None):
A_ : str = operator.lt if reverse else operator.gt
A_ : Optional[Any] = solution or []
if not arr:
return solution
A_ : Dict = [arr.pop(0)]
for i, item in enumerate(snake_case_):
if _operator(snake_case_ , sublist[-1]):
sublist.append(snake_case_)
arr.pop(snake_case_)
# merging sublist into solution list
if not solution:
solution.extend(snake_case_)
else:
while sublist:
A_ : Union[str, Any] = sublist.pop(0)
for i, xx in enumerate(snake_case_):
if not _operator(snake_case_ , snake_case_):
solution.insert(snake_case_ , snake_case_)
break
else:
solution.append(snake_case_)
strand_sort(snake_case_ , snake_case_ , snake_case_)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 709
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'mctct'
def __init__( self , lowercase_=8_065 , lowercase_=1_536 , lowercase_=36 , lowercase_=6_144 , lowercase_=4 , lowercase_=384 , lowercase_=920 , lowercase_=1e-5 , lowercase_=0.3 , lowercase_="relu" , lowercase_=0.02 , lowercase_=0.3 , lowercase_=0.3 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=1 , lowercase_=0.3 , lowercase_=1 , lowercase_=(7,) , lowercase_=(3,) , lowercase_=80 , lowercase_=1 , lowercase_=None , lowercase_="sum" , lowercase_=False , **lowercase_ , ):
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
_snake_case : Tuple = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : str = intermediate_size
_snake_case : str = num_attention_heads
_snake_case : Optional[Any] = attention_head_dim
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : int = layerdrop
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = initializer_range
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = pad_token_id
_snake_case : int = bos_token_id
_snake_case : Tuple = eos_token_id
_snake_case : Any = conv_glu_dim
_snake_case : Any = conv_dropout
_snake_case : Optional[int] = num_conv_layers
_snake_case : List[str] = input_feat_per_channel
_snake_case : Tuple = input_channels
_snake_case : int = conv_channels
_snake_case : Tuple = ctc_loss_reduction
_snake_case : Dict = ctc_zero_infinity
# prevents config testing fail with exporting to json
_snake_case : Union[str, Any] = list(lowercase_ )
_snake_case : Any = list(lowercase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 670
|
from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 670
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=[3_0, 3_0] , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.0_2 , snake_case_=3 , snake_case_=None , snake_case_=8 , snake_case_=1_0 , ) -> Optional[int]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = scope
__lowercase = n_targets
__lowercase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__lowercase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__lowercase = num_patches + 1 + self.num_detection_tokens
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__lowercase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__lowercase = []
for i in range(self.batch_size ):
__lowercase = {}
__lowercase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=snake_case_ )
__lowercase = torch.rand(self.n_targets , 4 , device=snake_case_ )
labels.append(snake_case_ )
__lowercase = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Optional[int]:
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
'''simple docstring'''
__lowercase = YolosModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
'''simple docstring'''
__lowercase = YolosForObjectDetection(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(pixel_values=snake_case_ )
__lowercase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__lowercase = model(pixel_values=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__UpperCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def A ( self , snake_case_ , snake_case_ , snake_case_=False ) -> int:
'''simple docstring'''
__lowercase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__lowercase = []
for i in range(self.model_tester.batch_size ):
__lowercase = {}
__lowercase = torch.ones(
size=(self.model_tester.n_targets,) , device=snake_case_ , dtype=torch.long )
__lowercase = torch.ones(
self.model_tester.n_targets , 4 , device=snake_case_ , dtype=torch.float )
labels.append(snake_case_ )
__lowercase = labels
return inputs_dict
def A ( self ) -> int:
'''simple docstring'''
__lowercase = YolosModelTester(self )
__lowercase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def A ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self ) -> Any:
'''simple docstring'''
pass
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(snake_case_ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
# in YOLOS, the seq_len is different
__lowercase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowercase = len(snake_case_ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = 1
self.assertEqual(out_len + added_hidden_states , len(snake_case_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# YOLOS has a different seq_length
__lowercase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*snake_case_ )
@slow
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = YolosModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self ) -> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def A ( self ) -> int:
'''simple docstring'''
__lowercase = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(snake_case_ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
__lowercase = model(inputs.pixel_values )
# verify outputs
__lowercase = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__lowercase = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=snake_case_ , )
__lowercase = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case_ , atol=1e-4 ) )
# verify postprocessing
__lowercase = image_processor.post_process_object_detection(
snake_case_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__lowercase = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(snake_case_ )
__lowercase = [7_5, 7_5, 1_7, 6_3, 1_7]
__lowercase = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(snake_case_ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , snake_case_ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , snake_case_ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , snake_case_ ) )
| 702
|
from itertools import count
def lowercase_ ( _UpperCamelCase = 50 ):
'''simple docstring'''
__lowercase = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 527
| 0
|
from itertools import permutations
def lowercase__ ( A_: tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase =[7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase__ ( A_: int = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = num - 1
__a = 0
while s % 2 == 0:
__a = s // 2
t += 1
for _ in range(5 ):
__a = random.randrange(2 , num - 1 )
__a = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if v != 1:
__a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__a = i + 1
__a = (v**2) % num
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 2:
return False
__a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1024 ):
"""simple docstring"""
while True:
__a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_SCREAMING_SNAKE_CASE ):
return num
if __name__ == "__main__":
lowerCamelCase__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 225
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase_ ( __snake_case ):
"""simple docstring"""
_snake_case : Dict = """speech_to_text_2"""
_snake_case : Tuple = ["""past_key_values"""]
_snake_case : Dict = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Optional[Any] , lowerCamelCase__ :List[Any]=1_00_00 , lowerCamelCase__ :Tuple=6 , lowerCamelCase__ :Optional[int]=20_48 , lowerCamelCase__ :str=4 , lowerCamelCase__ :str=0.0 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Optional[Any]="relu" , lowerCamelCase__ :List[str]=2_56 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=0.0 , lowerCamelCase__ :Tuple=0.0 , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :Union[str, Any]=2 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=1 , lowerCamelCase__ :str=0 , lowerCamelCase__ :List[Any]=2 , lowerCamelCase__ :Optional[int]=10_24 , **lowerCamelCase__ :List[str] , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = vocab_size
UpperCamelCase__ :Any = d_model
UpperCamelCase__ :List[str] = decoder_ffn_dim
UpperCamelCase__ :str = decoder_layers
UpperCamelCase__ :Optional[Any] = decoder_attention_heads
UpperCamelCase__ :Optional[int] = dropout
UpperCamelCase__ :Tuple = attention_dropout
UpperCamelCase__ :int = activation_dropout
UpperCamelCase__ :List[str] = activation_function
UpperCamelCase__ :int = init_std
UpperCamelCase__ :Dict = decoder_layerdrop
UpperCamelCase__ :Dict = use_cache
UpperCamelCase__ :Tuple = decoder_layers
UpperCamelCase__ :Any = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ :Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 719
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCamelCase = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
UpperCamelCase = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase__ : int ) -> Optional[Any]:
UpperCamelCase__ :str = set()
UpperCamelCase__ :Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ :List[str] = char
UpperCamelCase__ :str = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :List[Any]="__start__" , lowerCamelCase__ :Union[str, Any]="__end__" , lowerCamelCase__ :Tuple="__unk__" , lowerCamelCase__ :Optional[int]="__null__" , **lowerCamelCase__ :Optional[Any] , ):
super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__ )
with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase__ :Any = json.load(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase__ :List[str] = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase__ :Tuple = [tuple(merge.split() ) for merge in merges]
UpperCamelCase__ :str = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ :Tuple = {}
@property
def __a ( self :List[str] ):
return len(self.encoder )
def __a ( self :Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self :Any , lowerCamelCase__ :str ):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ :Tuple = re.sub("""([.,!?()])""" , r""" \1""" , lowerCamelCase__ )
UpperCamelCase__ :int = re.sub("""(')""" , r""" \1 """ , lowerCamelCase__ )
UpperCamelCase__ :str = re.sub(r"""\s{2,}""" , """ """ , lowerCamelCase__ )
if "\n" in token:
UpperCamelCase__ :int = token.replace("""\n""" , """ __newln__""" )
UpperCamelCase__ :str = token.split(""" """ )
UpperCamelCase__ :List[Any] = []
for token in tokens:
if not len(lowerCamelCase__ ):
continue
UpperCamelCase__ :Optional[Any] = token.lower()
UpperCamelCase__ :Optional[Any] = tuple(lowerCamelCase__ )
UpperCamelCase__ :int = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCamelCase__ :Optional[int] = get_pairs(lowerCamelCase__ )
if not pairs:
words.append(lowerCamelCase__ )
continue
while True:
UpperCamelCase__ :Any = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = bigram
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :Dict = 0
while i < len(lowerCamelCase__ ):
try:
UpperCamelCase__ :Union[str, Any] = word.index(lowerCamelCase__ , lowerCamelCase__ )
new_word.extend(word[i:j] )
UpperCamelCase__ :str = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ :int = tuple(lowerCamelCase__ )
UpperCamelCase__ :int = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
UpperCamelCase__ :Union[str, Any] = get_pairs(lowerCamelCase__ )
UpperCamelCase__ :int = """@@ """.join(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = word[:-4]
UpperCamelCase__ :int = word
words.append(lowerCamelCase__ )
return " ".join(lowerCamelCase__ )
def __a ( self :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :Dict = re.findall(r"""\S+\n?""" , lowerCamelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__ ).split(""" """ ) ) )
return split_tokens
def __a ( self :List[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Any = token.lower()
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def __a ( self :Optional[int] , lowerCamelCase__ :int ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def __a ( self :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Any = """ """.join(lowerCamelCase__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ :Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ :Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" )
UpperCamelCase__ :int = 0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase__ :Any = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 383
| 0
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : int = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""allegro/herbert-base-cased""": 5_14}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = HerbertTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase="</s>" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :str = [self.cls_token_id]
__magic_name__ :Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Dict = [self.sep_token_id]
__magic_name__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Optional[int] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Tuple="attention" ) -> List[str]:
_lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
_lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
_lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
_lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :List[Any] , snake_case__ :Optional[Any] , snake_case__ :Optional[Any]=False ) -> Tuple:
if split_mlp_wi:
_lowercase = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
_lowercase = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
_lowercase = (wi_a, wi_a)
else:
_lowercase = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
_lowercase = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Optional[Any] , snake_case__ :Dict ) -> str:
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :dict , *, snake_case__ :int , snake_case__ :bool ) -> List[str]:
_lowercase = traverse_util.flatten_dict(variables['target'] )
_lowercase = {'/'.join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowercase = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , snake_case__ )
_lowercase = collections.OrderedDict()
# Shared embeddings.
_lowercase = old['token_embedder/embedding']
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
_lowercase = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'encoder' , 'pre_attention_layer_norm' )
_lowercase , _lowercase , _lowercase , _lowercase = tax_attention_lookup(snake_case__ , snake_case__ , 'encoder' , 'attention' )
_lowercase = layer_norm
_lowercase = k.T
_lowercase = o.T
_lowercase = q.T
_lowercase = v.T
# Block i, layer 1 (MLP).
_lowercase = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'encoder' , 'pre_mlp_layer_norm' )
_lowercase , _lowercase = tax_mlp_lookup(snake_case__ , snake_case__ , 'encoder' , snake_case__ )
_lowercase = layer_norm
if split_mlp_wi:
_lowercase = wi[0].T
_lowercase = wi[1].T
else:
_lowercase = wi.T
_lowercase = wo.T
_lowercase = old[
'encoder/relpos_bias/rel_embedding'
].T
_lowercase = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
_lowercase = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'decoder' , 'pre_self_attention_layer_norm' )
_lowercase , _lowercase , _lowercase , _lowercase = tax_attention_lookup(snake_case__ , snake_case__ , 'decoder' , 'self_attention' )
_lowercase = layer_norm
_lowercase = k.T
_lowercase = o.T
_lowercase = q.T
_lowercase = v.T
# Block i, layer 1 (Cross Attention).
_lowercase = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'decoder' , 'pre_cross_attention_layer_norm' )
_lowercase , _lowercase , _lowercase , _lowercase = tax_attention_lookup(snake_case__ , snake_case__ , 'decoder' , 'encoder_decoder_attention' )
_lowercase = layer_norm
_lowercase = k.T
_lowercase = o.T
_lowercase = q.T
_lowercase = v.T
# Block i, layer 2 (MLP).
_lowercase = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'decoder' , 'pre_mlp_layer_norm' )
_lowercase , _lowercase = tax_mlp_lookup(snake_case__ , snake_case__ , 'decoder' , snake_case__ )
_lowercase = layer_norm
if split_mlp_wi:
_lowercase = wi[0].T
_lowercase = wi[1].T
else:
_lowercase = wi.T
_lowercase = wo.T
_lowercase = old['decoder/decoder_norm/scale']
_lowercase = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowercase = old['decoder/logits_dense/kernel'].T
return new
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :bool ) -> Union[str, Any]:
_lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowercase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowercase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_lowercase = state_dict['shared.weight']
return state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Optional[int] ) -> List[Any]:
_lowercase = checkpoints.load_tax_checkpoint(snake_case__ )
_lowercase = convert_tax_to_pytorch(snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ )
_lowercase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Any , snake_case__ :int , snake_case__ :bool = False ) -> Optional[int]:
_lowercase = TaConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowercase = TaEncoderModel(snake_case__ )
else:
_lowercase = TaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print('Done' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 710
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A_ ( yaml.SafeLoader ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ,__A : List[Any] ) -> Any:
_lowercase = [self.constructed_objects[key_node] for key_node, _ in node.value]
_lowercase = [tuple(__A ) if isinstance(__A ,__A ) else key for key in keys]
_lowercase = Counter(__A )
_lowercase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def __UpperCAmelCase ( self : Any ,__A : int ,__A : Dict=False ) -> Union[str, Any]:
_lowercase = super().construct_mapping(__A ,deep=__A )
self._check_no_duplicates_on_constructed_node(__A )
return mapping
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple[Optional[str], str]:
_lowercase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_lowercase = full_content[1:].index('---' ) + 1
_lowercase = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __UpperCAmelCase ( cls : List[Any] ,__A : Path ) -> "DatasetMetadata":
with open(__A ,encoding='utf-8' ) as readme_file:
_lowercase , _lowercase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__A )
else:
return cls()
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Path ) -> Optional[Any]:
if path.exists():
with open(__A ,encoding='utf-8' ) as readme_file:
_lowercase = readme_file.read()
else:
_lowercase = None
_lowercase = self._to_readme(__A )
with open(__A ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__A )
def __UpperCAmelCase ( self : Tuple ,__A : Optional[str] = None ) -> str:
if readme_content is not None:
_lowercase , _lowercase = _split_yaml_from_readme(__A )
_lowercase = '---\n' + self.to_yaml_string() + '---\n' + content
else:
_lowercase = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __UpperCAmelCase ( cls : List[Any] ,__A : str ) -> "DatasetMetadata":
_lowercase = yaml.load(__A ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_lowercase = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__A )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__A ,allow_unicode=__A ,encoding='utf-8' ,).decode('utf-8' )
snake_case = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
snake_case = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
snake_case = ap.parse_args()
snake_case = Path(args.readme_filepath)
snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 535
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''philschmid/bart-large-cnn-samsum'''
UpperCAmelCase__ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
UpperCAmelCase__ = '''summarizer'''
UpperCAmelCase__ = AutoTokenizer
UpperCAmelCase__ = AutoModelForSeqaSeqLM
UpperCAmelCase__ = ['''text''']
UpperCAmelCase__ = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[Any]) ->List[Any]:
'''simple docstring'''
return self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''' , truncation=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
return self.model.generate(**UpperCAmelCase__)[0]
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : str) ->str:
'''simple docstring'''
return self.pre_processor.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__)
| 87
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : List[str] = (DDPMScheduler,)
def lowercase_ ( self : List[str] , **UpperCamelCase__ : str)-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCamelCase__)
return config
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__)
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__)
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> int:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__)
def lowercase_ ( self : str)-> str:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Union[str, Any]:
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config()
__lowerCAmelCase: Dict = scheduler_class(**UpperCamelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.02)) < 1e-5
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config()
__lowerCAmelCase: Optional[int] = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = len(UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.dummy_model()
__lowerCAmelCase: Tuple = self.dummy_sample_deter
__lowerCAmelCase: Tuple = torch.manual_seed(0)
for t in reversed(range(UpperCamelCase__)):
# 1. predict noise residual
__lowerCAmelCase: List[str] = model(UpperCamelCase__ , UpperCamelCase__)
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase: List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowerCAmelCase: List[Any] = pred_prev_sample
__lowerCAmelCase: List[str] = torch.sum(torch.abs(UpperCamelCase__))
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def lowercase_ ( self : int)-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: str = self.get_scheduler_config(prediction_type="v_prediction")
__lowerCAmelCase: Dict = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Dict = len(UpperCamelCase__)
__lowerCAmelCase: Any = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
__lowerCAmelCase: str = torch.manual_seed(0)
for t in reversed(range(UpperCamelCase__)):
# 1. predict noise residual
__lowerCAmelCase: str = model(UpperCamelCase__ , UpperCamelCase__)
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase: Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowerCAmelCase: Optional[Any] = pred_prev_sample
__lowerCAmelCase: Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__))
__lowerCAmelCase: Dict = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.scheduler_classes[0]
__lowerCAmelCase: List[Any] = self.get_scheduler_config()
__lowerCAmelCase: List[Any] = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__)
__lowerCAmelCase: str = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase__):
if i == len(UpperCamelCase__) - 1:
__lowerCAmelCase: str = -1
else:
__lowerCAmelCase: str = timesteps[i + 1]
__lowerCAmelCase: List[Any] = scheduler.previous_timestep(UpperCamelCase__)
__lowerCAmelCase: List[str] = prev_t.item()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase__ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCamelCase__)
def lowercase_ ( self : Any)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.scheduler_classes[0]
__lowerCAmelCase: Any = self.get_scheduler_config()
__lowerCAmelCase: Dict = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = [1_0_0, 8_7, 5_0, 1, 0]
__lowerCAmelCase: Tuple = len(UpperCamelCase__)
with self.assertRaises(UpperCamelCase__ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.scheduler_classes[0]
__lowerCAmelCase: Any = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase__)
| 346
| 0
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = FunnelTokenizer
UpperCAmelCase_ = FunnelTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def A_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : Tuple = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A_ ( self : Tuple, **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Union[str, Any], **_UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE__ : Any = "unwanted, running"
return input_text, output_text
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_UpperCAmelCase, ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), [7, 4, 5, 1_0, 8, 9] )
def A_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE__ : int = tokenizer("UNwant\u00E9d,running" )
SCREAMING_SNAKE_CASE__ : int = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer("UNwant\u00E9d,running", "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len )
| 157
|
from collections.abc import Generator
def _a ( ) -> Generator[int, None, None]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = 0, 1
while True:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = b, a + b
yield b
def _a ( SCREAMING_SNAKE_CASE__ : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 157
| 1
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowercase_ :
def __init__( self , __A = None ) -> None:
if components is None:
SCREAMING_SNAKE_CASE_ : Dict =[]
SCREAMING_SNAKE_CASE_ : List[Any] =list(__A )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(__A , self.__components ) ) + ")"
def __add__( self , __A ) -> Vector:
SCREAMING_SNAKE_CASE_ : Any =len(self )
if size == len(__A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.__components[i] + other.component(__A ) for i in range(__A )]
return Vector(__A )
else:
raise Exception('''must have the same size''' )
def __sub__( self , __A ) -> Vector:
SCREAMING_SNAKE_CASE_ : int =len(self )
if size == len(__A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[self.__components[i] - other.component(__A ) for i in range(__A )]
return Vector(__A )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , __A ) -> Vector:
...
@overload
def __mul__( self , __A ) -> float:
...
def __mul__( self , __A ) -> float | Vector:
if isinstance(__A , (float, int) ):
SCREAMING_SNAKE_CASE_ : List[Any] =[c * other for c in self.__components]
return Vector(__A )
elif isinstance(__A , __A ) and len(self ) == len(__A ):
SCREAMING_SNAKE_CASE_ : int =len(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[self.__components[i] * other.component(__A ) for i in range(__A )]
return sum(__A )
else: # error case
raise Exception('''invalid operand!''' )
def _snake_case ( self ) -> Vector:
return Vector(self.__components )
def _snake_case ( self , __A ) -> float:
if isinstance(__A , __A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def _snake_case ( self , __A , __A ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE_ : Optional[int] =value
def _snake_case ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[c**2 for c in self.__components]
return math.sqrt(sum(__A ) )
def _snake_case ( self , __A , __A = False ) -> float:
SCREAMING_SNAKE_CASE_ : str =self * other
SCREAMING_SNAKE_CASE_ : Dict =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> Vector:
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
return Vector([0] * dimension )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Vector:
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ))
SCREAMING_SNAKE_CASE_ : Dict =[0] * dimension
SCREAMING_SNAKE_CASE_ : Dict =1
return Vector(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : Vector , UpperCAmelCase_ : Vector ) -> Vector:
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and (isinstance(UpperCAmelCase_ , (int, float) ))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Vector:
random.seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =[random.randint(UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ )]
return Vector(UpperCAmelCase_ )
class lowercase_ :
def __init__( self , __A , __A , __A ) -> None:
SCREAMING_SNAKE_CASE_ : Optional[Any] =matrix
SCREAMING_SNAKE_CASE_ : Dict =w
SCREAMING_SNAKE_CASE_ : Optional[int] =h
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , __A ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ : int =[]
for i in range(self.__height ):
SCREAMING_SNAKE_CASE_ : str =[
self.__matrix[i][j] + other.component(__A , __A )
for j in range(self.__width )
]
matrix.append(__A )
return Matrix(__A , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , __A ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
for i in range(self.__height ):
SCREAMING_SNAKE_CASE_ : List[Any] =[
self.__matrix[i][j] - other.component(__A , __A )
for j in range(self.__width )
]
matrix.append(__A )
return Matrix(__A , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , __A ) -> Matrix:
...
@overload
def __mul__( self , __A ) -> Vector:
...
def __mul__( self , __A ) -> Vector | Matrix:
if isinstance(__A , __A ): # matrix-vector
if len(__A ) == self.__width:
SCREAMING_SNAKE_CASE_ : Optional[Any] =zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE_ : Tuple =[
self.__matrix[i][j] * other.component(__A )
for j in range(self.__width )
]
ans.change_component(__A , sum(__A ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(__A , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE_ : Dict =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__A , self.__width , self.__height )
return None
def _snake_case ( self ) -> int:
return self.__height
def _snake_case ( self ) -> int:
return self.__width
def _snake_case ( self , __A , __A ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def _snake_case ( self , __A , __A , __A ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE_ : Any =value
else:
raise Exception('''change_component: indices out of bounds''' )
def _snake_case ( self , __A , __A ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
SCREAMING_SNAKE_CASE_ : int =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__A ) ):
SCREAMING_SNAKE_CASE_ : int =minor[i][:y] + minor[i][y + 1 :]
return Matrix(__A , self.__width - 1 , self.__height - 1 ).determinant()
def _snake_case ( self , __A , __A ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__A , __A )
else:
raise Exception('''Indices out of bounds''' )
def _snake_case ( self ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE_ : Tuple =[
self.__matrix[0][y] * self.cofactor(0 , __A ) for y in range(self.__width )
]
return sum(__A )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> Matrix:
SCREAMING_SNAKE_CASE_ : list[list[float]] =[[0] * n for _ in range(UpperCAmelCase_ )]
return Matrix(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Matrix:
random.seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : list[list[float]] =[
[random.randint(UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )
]
return Matrix(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 443
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowercase = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
_lowercase = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
_lowercase = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def _snake_case ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _snake_case ( self , __A , __A , __A=None , __A=True , __A=False ) -> List[str]:
if rouge_types is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
SCREAMING_SNAKE_CASE_ : Tuple =rouge_scorer.RougeScorer(rouge_types=__A , use_stemmer=__A )
if use_aggregator:
SCREAMING_SNAKE_CASE_ : List[str] =scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE_ : Tuple =[]
for ref, pred in zip(__A , __A ):
SCREAMING_SNAKE_CASE_ : int =scorer.score(__A , __A )
if use_aggregator:
aggregator.add_scores(__A )
else:
scores.append(__A )
if use_aggregator:
SCREAMING_SNAKE_CASE_ : Tuple =aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE_ : Optional[int] ={}
for key in scores[0]:
SCREAMING_SNAKE_CASE_ : Tuple =[score[key] for score in scores]
return result
| 443
| 1
|
def UpperCamelCase_ ( a_ ) ->int:
A =len(a_ )
A =len(matrix[0] )
A =min(a_ , a_ )
for row in range(a_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , a_ ):
A =matrix[col][row] / matrix[row][row]
for i in range(a_ , a_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
A =True
for i in range(row + 1 , a_ ):
if matrix[i][row] != 0:
A , A =matrix[i], matrix[row]
A =False
break
if reduce:
rank -= 1
for i in range(a_ ):
A =matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689
|
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689
| 1
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = multiprocessing.Manager()
__SCREAMING_SNAKE_CASE = manager.list()
__SCREAMING_SNAKE_CASE = multiprocessing.Process(target=lowercase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__SCREAMING_SNAKE_CASE = shutil.rmtree
__SCREAMING_SNAKE_CASE = os.rmdir
__SCREAMING_SNAKE_CASE = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__SCREAMING_SNAKE_CASE = {}
with swallow_io():
with time_limit(lowercase_ ):
exec(lowercase_ , lowercase_ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
__SCREAMING_SNAKE_CASE = rmtree
__SCREAMING_SNAKE_CASE = rmdir
__SCREAMING_SNAKE_CASE = chdir
@contextlib.contextmanager
def _a ( UpperCAmelCase__ ) -> Optional[Any]:
def signal_handler(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , lowercase_ )
signal.signal(signal.SIGALRM , lowercase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _a ( ) -> int:
__SCREAMING_SNAKE_CASE = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase_ ):
with contextlib.redirect_stderr(lowercase_ ):
with redirect_stdin(lowercase_ ):
yield
@contextlib.contextmanager
def _a ( ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase_ ):
yield dirname
class A__( snake_case__ ):
pass
class A__( io.StringIO ):
def _a ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
raise OSError
def _a ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
raise OSError
def _a ( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
raise OSError
def _a ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
return False
class A__( contextlib._RedirectStream ): # type: ignore
lowerCAmelCase = '''stdin'''
@contextlib.contextmanager
def _a ( UpperCAmelCase__ ) -> List[str]:
if root == ".":
yield
return
__SCREAMING_SNAKE_CASE = os.getcwd()
os.chdir(lowercase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase_ )
def _a ( UpperCAmelCase__=None ) -> str:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
import os
__SCREAMING_SNAKE_CASE = '''1'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
import shutil
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
import subprocess
__SCREAMING_SNAKE_CASE = None # type: ignore
__SCREAMING_SNAKE_CASE = None
import sys
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
| 482
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase_ )
if decoder_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ )
if cross_attn_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=20 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.eos_token_id # Eos Token
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = self.get_config()
UpperCamelCase = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).get_decoder().to(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase = inputs_dict["input_ids"]
UpperCamelCase = inputs_dict["attention_mask"]
UpperCamelCase = inputs_dict["head_mask"]
# first forward pass
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE )[
"last_hidden_state"
]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-2 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.encoder_last_hidden_state
UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = False
lowercase = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = MaMaaaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertEqual(info["missing_keys"] , [] )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not self.is_encoder_decoder:
UpperCamelCase = inputs["input_ids"]
del inputs["input_ids"]
else:
UpperCamelCase = inputs["input_ids"]
UpperCamelCase = inputs.get("decoder_input_ids" , SCREAMING_SNAKE_CASE )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , SCREAMING_SNAKE_CASE )
UpperCamelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE )[0]
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = input_dict["input_ids"]
UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval().to(SCREAMING_SNAKE_CASE )
if torch_device == "cuda":
model.half()
model.generate(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , num_return_sequences=3 )
def __magic_name__ ( lowercase_ ) -> int:
'''simple docstring'''
return torch.tensor(lowercase_ , dtype=torch.long , device=lowercase_ )
__a : List[str] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
# change to intended input
UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCamelCase = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase = model.generate(
input_ids=dct["input_ids"].to(SCREAMING_SNAKE_CASE ) , attention_mask=dct["attention_mask"].to(SCREAMING_SNAKE_CASE ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCamelCase = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
UpperCamelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
assert generated == expected_en
| 606
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __UpperCamelCase ( a, a=False) ->Optional[Any]:
lowerCamelCase__ = OmegaConf.load(a)
if display:
print(yaml.dump(OmegaConf.to_container(a)))
return config
def __UpperCamelCase ( a, a=None, a=None) ->List[Any]:
if conf_path is None:
lowerCamelCase__ = "./model_checkpoints/vqgan_only.yaml"
lowerCamelCase__ = load_config(a, display=a)
lowerCamelCase__ = VQModel(**config.model.params)
if ckpt_path is None:
lowerCamelCase__ = "./model_checkpoints/vqgan_only.pt"
lowerCamelCase__ = torch.load(a, map_location=a)
if ".ckpt" in ckpt_path:
lowerCamelCase__ = sd["state_dict"]
model.load_state_dict(a, strict=a)
model.to(a)
del sd
return model
def __UpperCamelCase ( a, a) ->int:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model.encode(a)
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}")
lowerCamelCase__ = model.decode(a)
return xrec
def __UpperCamelCase ( a, a=False) ->Dict:
lowerCamelCase__ , lowerCamelCase__ = string.rsplit(".", 1)
if reload:
lowerCamelCase__ = importlib.import_module(a)
importlib.reload(a)
return getattr(importlib.import_module(a, package=a), cls)
def __UpperCamelCase ( a) ->int:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", {}))
def __UpperCamelCase ( a, a, a=True, a=True) ->Optional[Any]:
lowerCamelCase__ = instantiate_from_config(a)
if sd is not None:
model.load_state_dict(a)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __UpperCamelCase ( a, a, a, a) ->Dict:
# load the specified checkpoint
if ckpt:
lowerCamelCase__ = torch.load(a, map_location="cpu")
lowerCamelCase__ = pl_sd["global_step"]
print(f"loaded model from global step {global_step}.")
else:
lowerCamelCase__ = {"state_dict": None}
lowerCamelCase__ = None
lowerCamelCase__ = load_model_from_config(config.model, pl_sd["state_dict"], gpu=a, eval_mode=a)["model"]
return model, global_step
| 360
| 0
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCamelCase_ = {
"""facebook/blenderbot_small-90M""": 512,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = BlenderbotSmallTokenizer
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : List[str]="<|endoftext|>" , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[Any]=True , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : int =add_prefix_space
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple=None ):
'''simple docstring'''
lowercase : Any =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 92
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotSmallConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
UpperCAmelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 663
| 0
|
def _lowerCAmelCase ( UpperCamelCase__: int = 4_00_00_00 ) -> Optional[Any]:
"""simple docstring"""
A = [0, 1]
A = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 711
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _UpperCamelCase :
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Any:
return self.get_dummy_input()
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _UpperCAmelCase ( self , a__=True , a__=False , a__=False , a__=False , ) -> Optional[Any]:
A = 4
A = 32
A = (32, 32)
A = torch.manual_seed(0 )
A = torch.device(a__ )
A = (batch_size, num_channels) + sizes
A = randn_tensor(a__ , generator=a__ , device=a__ )
A = {"""hidden_states""": hidden_states}
if include_temb:
A = 128
A = randn_tensor((batch_size, temb_channels) , generator=a__ , device=a__ )
if include_res_hidden_states_tuple:
A = torch.manual_seed(1 )
A = (randn_tensor(a__ , generator=a__ , device=a__ ),)
if include_encoder_hidden_states:
A = floats_tensor((batch_size, 32, 32) ).to(a__ )
if include_skip_sample:
A = randn_tensor(((batch_size, 3) + sizes) , generator=a__ , device=a__ )
return dummy_input
def _UpperCAmelCase ( self ) -> int:
A = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
unet_block.to(a__ )
unet_block.eval()
with torch.no_grad():
A = unet_block(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
self.assertEqual(output.shape , self.output_shape )
A = output[0, -1, -3:, -3:]
A = torch.tensor(a__ ).to(a__ )
assert torch_all_close(output_slice.flatten() , a__ , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _UpperCAmelCase ( self ) -> str:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
model.to(a__ )
model.train()
A = model(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
A = torch.device(a__ )
A = randn_tensor(output.shape , device=a__ )
A = torch.nn.functional.mse_loss(a__ , a__ )
loss.backward()
| 546
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 461
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ : Any = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
_A = ["pixel_values"]
def __init__(self : Dict , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **snake_case : Optional[int] , ) -> None:
super().__init__(**snake_case )
_lowercase : str = size if size is not None else {"shortest_edge": 224}
_lowercase : List[str] = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : List[Any] = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : Optional[int] = do_resize
_lowercase : List[str] = size
_lowercase : Tuple = resample
_lowercase : Union[str, Any] = do_center_crop
_lowercase : List[str] = crop_size
_lowercase : Tuple = do_rescale
_lowercase : Union[str, Any] = rescale_factor
_lowercase : str = do_normalize
_lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowercase : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a(self : str , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : str , ) -> np.ndarray:
_lowercase : Tuple = get_size_dict(snake_case , default_to_square=snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowercase : List[str] = int((256 / 224) * size["shortest_edge"] )
_lowercase : Dict = get_resize_output_image_size(snake_case , size=snake_case , default_to_square=snake_case )
_lowercase : int = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
snake_case , size=(size_dict["height"], size_dict["width"]) , resample=snake_case , data_format=snake_case , **snake_case )
def _a(self : Any , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Any , ) -> np.ndarray:
_lowercase : Tuple = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def _a(self : Dict , snake_case : np.ndarray , snake_case : Union[int, float] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[str] , ) -> np.ndarray:
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _a(self : str , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Optional[int] , ) -> np.ndarray:
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _a(self : Any , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = None , snake_case : Optional[bool] = None , snake_case : Optional[Dict[str, int]] = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, Iterable[float]]] = None , snake_case : Optional[Union[float, Iterable[float]]] = None , snake_case : Optional[TensorType] = None , snake_case : ChannelDimension = ChannelDimension.FIRST , **snake_case : Tuple , ) -> BatchFeature:
_lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : Any = resample if resample is not None else self.resample
_lowercase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Dict = image_mean if image_mean is not None else self.image_mean
_lowercase : Dict = image_std if image_std is not None else self.image_std
_lowercase : Optional[int] = size if size is not None else self.size
_lowercase : Dict = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : Dict = crop_size if crop_size is not None else self.crop_size
_lowercase : Optional[int] = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : Dict = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowercase : Dict = [to_numpy_array(snake_case ) for image in images]
if do_resize:
_lowercase : List[Any] = [self.resize(snake_case , snake_case , snake_case ) for image in images]
if do_center_crop:
_lowercase : Union[str, Any] = [self.center_crop(snake_case , snake_case ) for image in images]
if do_rescale:
_lowercase : List[str] = [self.rescale(snake_case , snake_case ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(snake_case , snake_case , snake_case ) for image in images]
_lowercase : Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
_lowercase : str = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 461
| 1
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
A__ : Optional[Any] =version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
A__ : Any ='\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
A__ : Any ='\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
A__ : Optional[int] ='\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase_( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def lowercase_( self : int , lowerCamelCase : Optional[Any] ):
"""simple docstring"""
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def lowercase_( self : int , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Optional[Any]=0.9 , lowerCamelCase : Dict=3 , lowerCamelCase : Union[str, Any]=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version("""3.6.5""" ):
__A : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase ) , word_tokenize(lowerCamelCase ) , alpha=lowerCamelCase , beta=lowerCamelCase , gamma=lowerCamelCase )
for ref, pred in zip(lowerCamelCase , lowerCamelCase )
]
else:
__A : Optional[int] = [
meteor_score.single_meteor_score(lowerCamelCase , lowerCamelCase , alpha=lowerCamelCase , beta=lowerCamelCase , gamma=lowerCamelCase )
for ref, pred in zip(lowerCamelCase , lowerCamelCase )
]
return {"meteor": np.mean(lowerCamelCase )}
| 718
|
'''simple docstring'''
A__ : int =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def A_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def A_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 499
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Any = 'open-llama'
def __init__( self , snake_case=100_000 , snake_case=4_096 , snake_case=11_008 , snake_case=32 , snake_case=32 , snake_case="silu" , snake_case=2_048 , snake_case=0.02 , snake_case=1e-6 , snake_case=True , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=False , snake_case=True , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=True , snake_case=None , **snake_case , ):
"""simple docstring"""
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : str = rms_norm_eps
lowerCAmelCase__ : int = use_cache
lowerCAmelCase__ : str = kwargs.pop(
"use_memorry_efficient_attention" , snake_case )
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_dropout_prob
lowerCAmelCase__ : int = use_stable_embedding
lowerCAmelCase__ : List[Any] = shared_input_output_embedding
lowerCAmelCase__ : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , tie_word_embeddings=snake_case , **snake_case , )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
lowerCAmelCase__ : str = self.rope_scaling.get("type" , snake_case )
lowerCAmelCase__ : int = self.rope_scaling.get("factor" , snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case , snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 453
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class __a ( __magic_name__ ):
"""simple docstring"""
def __init__( self , *snake_case , **snake_case ):
"""simple docstring"""
super().__init__(*snake_case , **snake_case )
self.check_model_type(snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = {}, {}
if padding is not None:
lowerCAmelCase__ : Dict = padding
if truncation is not None:
lowerCAmelCase__ : Tuple = truncation
if top_k is not None:
lowerCAmelCase__ : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case , snake_case = None , **snake_case ):
"""simple docstring"""
if isinstance(snake_case , (Image.Image, str) ) and isinstance(snake_case , snake_case ):
lowerCAmelCase__ : int = {"image": image, "question": question}
else:
lowerCAmelCase__ : Optional[int] = image
lowerCAmelCase__ : Optional[Any] = super().__call__(snake_case , **snake_case )
return results
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case=False , snake_case=False ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = load_image(inputs["image"] )
lowerCAmelCase__ : str = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=snake_case , truncation=snake_case )
lowerCAmelCase__ : int = self.image_processor(images=snake_case , return_tensors=self.framework )
model_inputs.update(snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model(**snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowerCAmelCase__ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ : Optional[int] = model_outputs.logits.sigmoid()[0]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = probs.topk(snake_case )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : int = scores.tolist()
lowerCAmelCase__ : str = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case , snake_case )]
| 453
| 1
|
def UpperCAmelCase ( UpperCamelCase__ ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase ( UpperCamelCase__ ) -> bool:
'''simple docstring'''
__lowerCAmelCase = credit_card_number
__lowerCAmelCase = 0
__lowerCAmelCase = len(UpperCamelCase__ ) - 2
for i in range(UpperCamelCase__ , -1 , -2 ):
# double the value of every second digit
__lowerCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__lowerCAmelCase = cc_number[:i] + str(UpperCamelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCamelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCAmelCase ( UpperCamelCase__ ) -> bool:
'''simple docstring'''
__lowerCAmelCase = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(UpperCamelCase__ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(UpperCamelCase__ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(UpperCamelCase__ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 702
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
def __init__( self: Union[str, Any], _lowercase: str, _lowercase: Optional[Any]=12, _lowercase: Tuple=7, _lowercase: Union[str, Any]=True, _lowercase: Dict=True, _lowercase: List[Any]=True, _lowercase: int=99, _lowercase: List[str]=32, _lowercase: Dict=32, _lowercase: Optional[Any]=2, _lowercase: Optional[Any]=4, _lowercase: List[str]=37, _lowercase: Any=0.1, _lowercase: List[Any]=0.1, _lowercase: List[str]=512, _lowercase: Optional[int]=0.02, _lowercase: Dict=0, _lowercase: str=None, ):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = projection_dim
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = bos_token_id
def _lowercase ( self: Dict):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
__lowerCAmelCase = input_mask.numpy()
__lowerCAmelCase , __lowerCAmelCase = input_mask.shape
__lowerCAmelCase = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(_lowercase):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_lowercase)
def _lowercase ( self: Dict):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def _lowercase ( self: Dict, _lowercase: Union[str, Any], _lowercase: int, _lowercase: List[str]):
'''simple docstring'''
__lowerCAmelCase = TFBlipTextModel(config=_lowercase)
__lowerCAmelCase = model(_lowercase, attention_mask=_lowercase, training=_lowercase)
__lowerCAmelCase = model(_lowercase, training=_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def _lowercase ( self: Optional[int]):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( lowerCAmelCase__ , unittest.TestCase ):
__UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = BlipTextModelTester(self)
__lowerCAmelCase = ConfigTester(self, config_class=_lowercase, hidden_size=37)
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: str):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def _lowercase ( self: Any):
'''simple docstring'''
pass
def _lowercase ( self: Tuple):
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""")
def _lowercase ( self: str):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def _lowercase ( self: Dict):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
pass
@slow
def _lowercase ( self: str):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFBlipTextModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def _lowercase ( self: Optional[int], _lowercase: Dict=True):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_lowercase)
| 334
| 0
|
from string import ascii_uppercase
SCREAMING_SNAKE_CASE : Optional[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def __A ( _A , _A ):
"""simple docstring"""
if isinstance(_A , _A ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(_A , _A ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(_A , _A ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
__a = ""
__a = 0
__a = 0
while div != 1:
__a , __a = divmod(_A , _A )
if base >= 11 and 9 < mod < 36:
__a = ALPHABET_VALUES[str(_A )]
else:
__a = str(_A )
new_value += actual_value
__a = num // base
__a = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_A )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 197
|
def __A ( _A , _A ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_A , x % y )
def __A ( _A , _A ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_A , _A )
def __A ( _A = 20 ):
"""simple docstring"""
__a = 1
for i in range(1 , n + 1 ):
__a = lcm(_A , _A )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 197
| 1
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__snake_case : Optional[Any] = 2_048
__snake_case : List[Any] = 4_096
__snake_case : List[str] = 42
__snake_case : List[str] = os.environ.pop('PROCESS_TRAIN', 'false')
__snake_case : Optional[int] = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def choose_first(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
UpperCAmelCase__ :str = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase__ :str = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
UpperCAmelCase__ :Tuple = {'id': example['id']}
UpperCAmelCase__ :int = example['annotations']
UpperCAmelCase__ :Union[str, Any] = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase__ :str = ['yes'] if 1 in yes_no_answer else ['no']
UpperCAmelCase__ :Dict = []
UpperCAmelCase__ :Tuple = []
UpperCAmelCase__ :Any = ['<cls>']
else:
UpperCAmelCase__ :Tuple = ['short']
UpperCAmelCase__ :int = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
UpperCAmelCase__ :Union[str, Any] = ['long']
UpperCAmelCase__ :Union[str, Any] = choose_first(annotation['long_answer'] , is_long_answer=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Dict = []
answer.update(SCREAMING_SNAKE_CASE )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase__ :Tuple = True
else:
UpperCAmelCase__ :Dict = False
UpperCAmelCase__ :List[Any] = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , SCREAMING_SNAKE_CASE ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCAmelCase__ :List[str] = _get_single_answer(SCREAMING_SNAKE_CASE )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ :Union[str, Any] = example['document']['tokens']
UpperCAmelCase__ :List[str] = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(SCREAMING_SNAKE_CASE ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase__ :Optional[Any] = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase__ :str = example['document']['tokens']
UpperCAmelCase__ :List[Any] = answer['start_token']
UpperCAmelCase__ :Dict = answer['end_token']
UpperCAmelCase__ :int = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase__ :Optional[Any] = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase__ :str = doc['is_html'][answer['start_token'] : answer['end_token']]
UpperCAmelCase__ :Any = doc['token'][answer['start_token'] : answer['end_token']]
UpperCAmelCase__ :str = ' '.join([old[i] for i in range(len(SCREAMING_SNAKE_CASE ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , SCREAMING_SNAKE_CASE , end='\n' )
print('Old:' , SCREAMING_SNAKE_CASE , end='\n\n' )
return {
"context": " ".join(SCREAMING_SNAKE_CASE ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCAmelCase__ :Dict = get_context_and_ans(SCREAMING_SNAKE_CASE , assertion=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase__ :Optional[Any] = tokenizer(example['question']['text'] , out['context'] ).input_ids
UpperCAmelCase__ :Any = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ :str = []
UpperCAmelCase__ :str = []
UpperCAmelCase__ :str = input_ids[:q_len]
UpperCAmelCase__ :Union[str, Any] = range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase__ :Optional[int] = i + max_length - q_len
UpperCAmelCase__ :Union[str, Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(SCREAMING_SNAKE_CASE ),
"end_token": [-100] * len(SCREAMING_SNAKE_CASE ),
"category": category,
},
}
UpperCAmelCase__ :Optional[Any] = out['context'].split()
UpperCAmelCase__ :Optional[int] = splitted_context[answer['end_token']]
UpperCAmelCase__ :Optional[Any] = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=SCREAMING_SNAKE_CASE , ).input_ids )
UpperCAmelCase__ :Tuple = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase__ :str = len(tokenizer(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase__ :str = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
UpperCAmelCase__ :Union[str, Any] = answer['start_token']
UpperCAmelCase__ :Union[str, Any] = answer['end_token']
if assertion:
UpperCAmelCase__ :str = tokenizer.decode(SCREAMING_SNAKE_CASE )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , SCREAMING_SNAKE_CASE , end='\n\n' )
if len(SCREAMING_SNAKE_CASE ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase__ :List[str] = input_ids[:q_len]
UpperCAmelCase__ :Tuple = range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , max_length - doc_stride )
UpperCAmelCase__ :int = []
UpperCAmelCase__ :Optional[Any] = []
UpperCAmelCase__ :int = []
UpperCAmelCase__ :List[str] = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase__ :List[Any] = i + max_length - q_len
UpperCAmelCase__ :Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase__ :Any = start_token - i + q_len
UpperCAmelCase__ :List[Any] = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
UpperCAmelCase__ :Tuple = -100
UpperCAmelCase__ :List[Any] = -100
answers_category.append('null' )
UpperCAmelCase__ :List[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(SCREAMING_SNAKE_CASE )
answers_end_token.append(SCREAMING_SNAKE_CASE )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(SCREAMING_SNAKE_CASE ) )
print('Old:' , tokenizer.decode(SCREAMING_SNAKE_CASE ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCAmelCase__ :Tuple = get_strided_contexts_and_ans(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , doc_stride=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , assertion=SCREAMING_SNAKE_CASE , )
return example
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with jsonlines.open(SCREAMING_SNAKE_CASE , 'a' ) as writer:
for example in tqdm(SCREAMING_SNAKE_CASE , total=len(SCREAMING_SNAKE_CASE ) , desc='Saving samples ... ' ):
UpperCAmelCase__ :Any = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__snake_case : Tuple = load_dataset('natural_questions')
__snake_case : int = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
__snake_case : Tuple = data['train' if PROCESS_TRAIN == 'true' else 'validation']
__snake_case : Tuple = {
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
__snake_case : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__snake_case : Tuple = data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
__snake_case : Dict = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 702
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case : List[str] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase__ :List[str] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE , id=SCREAMING_SNAKE_CASE )
| 433
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="resnet50" , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = out_indices if out_indices is not None else [4]
__SCREAMING_SNAKE_CASE = stage_names
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = is_training
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def snake_case_ ( self):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TimmBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE_ ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[Any] = (TimmBackbone,) if is_torch_available() else ()
__lowercase : str = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__lowercase : List[Any] = False
__lowercase : Optional[Any] = False
__lowercase : Dict = False
__lowercase : Optional[int] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TimmBackboneModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__)
def snake_case_ ( self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """resnet18"""
__SCREAMING_SNAKE_CASE = """microsoft/resnet-18"""
__SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(lowerCAmelCase__)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
__SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ , out_indices=[1, 2, 3])
__SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(lowerCAmelCase__ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""")
def snake_case_ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case_ ( self):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def snake_case_ ( self):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def snake_case_ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case_ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""")
def snake_case_ ( self):
pass
@unittest.skip("""Safetensors is not supported by timm.""")
def snake_case_ ( self):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.has_attentions
# no need to test all models as different heads yield the same functionality
__SCREAMING_SNAKE_CASE = self.all_model_classes[0]
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = outputs[0][-1]
# Encoder-/Decoder-only models
__SCREAMING_SNAKE_CASE = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__SCREAMING_SNAKE_CASE = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase__)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
__SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
__SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
| 155
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__magic_name__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _lowerCAmelCase ( UpperCamelCase_ = "mumbai" ):
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
__SCREAMING_SNAKE_CASE = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__SCREAMING_SNAKE_CASE = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 155
| 1
|
from abc import ABC, abstractmethod
from typing import List, Optional
class __A ( __a ):
def __init__( self :Dict ):
'''simple docstring'''
self.test()
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Tuple =0
__magic_name__ : List[Any] =False
while not completed:
if counter == 1:
self.reset()
__magic_name__ : List[str] =self.advance()
if not self.does_advance(a_ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__magic_name__ : Union[str, Any] =self.update(a_ )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def A__ ( self :Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A__ ( self :Optional[int] , __snake_case :int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A__ ( self :Optional[int] , __snake_case :int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A__ ( self :Tuple ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A__ ( self :int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A__ ( self :List[str] , __snake_case :Tuple=False ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class __A ( __a ):
def __init__( self :Tuple , __snake_case :List[int] ):
'''simple docstring'''
super(a_ , self ).__init__()
if not isinstance(a_ , a_ ) or len(a_ ) == 0:
raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
__magic_name__ : Optional[int] =token_ids
__magic_name__ : Dict =len(self.token_ids )
__magic_name__ : Union[str, Any] =-1 # the index of the currently fulfilled step
__magic_name__ : str =False
def A__ ( self :Optional[int] ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def A__ ( self :List[Any] , __snake_case :int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def A__ ( self :List[str] , __snake_case :int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}" )
__magic_name__ : Optional[int] =False
__magic_name__ : List[str] =False
__magic_name__ : List[Any] =False
if self.does_advance(a_ ):
self.fulfilled_idx += 1
__magic_name__ : List[str] =True
if self.fulfilled_idx == (self.seqlen - 1):
__magic_name__ : str =True
__magic_name__ : Optional[int] =completed
else:
# failed to make progress.
__magic_name__ : Union[str, Any] =True
self.reset()
return stepped, completed, reset
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : int =False
__magic_name__ : Dict =0
def A__ ( self :Tuple ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def A__ ( self :Union[str, Any] , __snake_case :str=False ):
'''simple docstring'''
__magic_name__ : Dict =PhrasalConstraint(self.token_ids )
if stateful:
__magic_name__ : List[Any] =self.seqlen
__magic_name__ : str =self.fulfilled_idx
__magic_name__ : Union[str, Any] =self.completed
return new_constraint
class __A :
def __init__( self :int , __snake_case :List[List[int]] , __snake_case :int=True ):
'''simple docstring'''
__magic_name__ : List[str] =max([len(a_ ) for one in nested_token_ids] )
__magic_name__ : int ={}
for token_ids in nested_token_ids:
__magic_name__ : List[str] =root
for tidx, token_id in enumerate(a_ ):
if token_id not in level:
__magic_name__ : int ={}
__magic_name__ : List[Any] =level[token_id]
if no_subsets and self.has_subsets(a_ , a_ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f" {nested_token_ids}." )
__magic_name__ : int =root
def A__ ( self :Tuple , __snake_case :Tuple ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.trie
for current_token in current_seq:
__magic_name__ : Any =start[current_token]
__magic_name__ : Any =list(start.keys() )
return next_tokens
def A__ ( self :List[Any] , __snake_case :Optional[int] ):
'''simple docstring'''
__magic_name__ : Any =self.next_tokens(a_ )
return len(a_ ) == 0
def A__ ( self :Dict , __snake_case :List[Any] ):
'''simple docstring'''
__magic_name__ : int =list(root.values() )
if len(a_ ) == 0:
return 1
else:
return sum([self.count_leaves(a_ ) for nn in next_nodes] )
def A__ ( self :str , __snake_case :List[Any] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : str =self.count_leaves(a_ )
return len(a_ ) != leaf_count
class __A ( __a ):
def __init__( self :Tuple , __snake_case :List[List[int]] ):
'''simple docstring'''
super(a_ , self ).__init__()
if not isinstance(a_ , a_ ) or len(a_ ) == 0:
raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(a_ , a_ ) for token_ids in nested_token_ids ):
raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
__magic_name__ : Optional[Any] =DisjunctiveTrie(a_ )
__magic_name__ : Optional[int] =nested_token_ids
__magic_name__ : Tuple =self.trie.max_height
__magic_name__ : List[str] =[]
__magic_name__ : Union[str, Any] =False
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =self.trie.next_tokens(self.current_seq )
if len(a_ ) == 0:
return None
else:
return token_list
def A__ ( self :Tuple , __snake_case :int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}" )
__magic_name__ : Optional[Any] =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def A__ ( self :Any , __snake_case :int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}" )
__magic_name__ : Any =False
__magic_name__ : Union[str, Any] =False
__magic_name__ : Tuple =False
if self.does_advance(a_ ):
self.current_seq.append(a_ )
__magic_name__ : Optional[Any] =True
else:
__magic_name__ : Tuple =True
self.reset()
__magic_name__ : Any =self.trie.reached_leaf(self.current_seq )
__magic_name__ : Tuple =completed
return stepped, completed, reset
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =False
__magic_name__ : Tuple =[]
def A__ ( self :str ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def A__ ( self :Dict , __snake_case :List[Any]=False ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =DisjunctiveConstraint(self.token_ids )
if stateful:
__magic_name__ : Tuple =self.seqlen
__magic_name__ : List[str] =self.current_seq
__magic_name__ : List[Any] =self.completed
return new_constraint
class __A :
def __init__( self :Any , __snake_case :List[Constraint] ):
'''simple docstring'''
__magic_name__ : Any =constraints
# max # of steps required to fulfill a given constraint
__magic_name__ : Optional[int] =max([c.seqlen for c in constraints] )
__magic_name__ : Optional[Any] =len(a_ )
__magic_name__ : List[Any] =False
self.init_state()
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[Any] =[]
__magic_name__ : Union[str, Any] =None
__magic_name__ : Optional[int] =[constraint.copy(stateful=a_ ) for constraint in self.constraints]
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Any =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__magic_name__ : str =constraint.advance()
if isinstance(a_ , a_ ):
token_list.append(a_ )
elif isinstance(a_ , a_ ):
token_list.extend(a_ )
else:
__magic_name__ : List[str] =self.inprogress_constraint.advance()
if isinstance(a_ , a_ ):
token_list.append(a_ )
elif isinstance(a_ , a_ ):
token_list.extend(a_ )
if len(a_ ) == 0:
return None
else:
return token_list
def A__ ( self :int , __snake_case :Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__magic_name__ : int =self.add(a_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def A__ ( self :List[Any] , __snake_case :int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`." )
__magic_name__ : List[str] =False, False
if self.completed:
__magic_name__ : Dict =True
__magic_name__ : List[Any] =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__magic_name__ : Optional[int] =self.inprogress_constraint.update(a_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_ ) )
__magic_name__ : Union[str, Any] =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__magic_name__ : Dict =None
if len(self.pending_constraints ) == 0:
# we're done!
__magic_name__ : Union[str, Any] =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a_ ):
__magic_name__ : Any =pending_constraint.update(a_ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(a_ )
__magic_name__ : Optional[Any] =None
if not complete and stepped:
__magic_name__ : List[Any] =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__magic_name__ : Any =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__magic_name__ : List[str] =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def A__ ( self :Dict , __snake_case :int=True ):
'''simple docstring'''
__magic_name__ : Dict =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__magic_name__ : List[Any] =[
constraint.copy(stateful=a_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__magic_name__ : Dict =self.inprogress_constraint.copy(stateful=a_ )
__magic_name__ : List[str] =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 706
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
def run_func(lowerCamelCase ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase , **lowerCamelCase ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase , **lowerCamelCase ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =random.Random()
__magic_name__ : Union[str, Any] =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __A ( UpperCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "TensorFlow"
@property
def A__ ( self :str ):
'''simple docstring'''
return tf.__version__
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Union[str, Any] =self._prepare_inference_func(__snake_case , __snake_case , __snake_case )
return self._measure_speed(_inference )
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Any =self._prepare_train_func(__snake_case , __snake_case , __snake_case )
return self._measure_speed(_train )
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __snake_case )
__magic_name__ : int =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Tuple =self._prepare_inference_func(__snake_case , __snake_case , __snake_case )
return self._measure_memory(_inference )
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __snake_case )
__magic_name__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Any =self._prepare_train_func(__snake_case , __snake_case , __snake_case )
return self._measure_memory(_train )
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : int =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__magic_name__ : Any =(
hasattr(__snake_case , """architectures""" )
and isinstance(config.architectures , __snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ : Optional[int] ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ : Optional[int] =__import__("""transformers""" , fromlist=[model_class] )
__magic_name__ : Optional[Any] =getattr(__snake_case , __snake_case )
__magic_name__ : Optional[Any] =model_cls(__snake_case )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__magic_name__ : Optional[int] =TF_MODEL_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
__magic_name__ : List[str] =config.vocab_size if hasattr(__snake_case , """vocab_size""" ) else config.encoder.vocab_size
__magic_name__ : Any =random_input_ids(__snake_case , __snake_case , __snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__snake_case , decoder_input_ids=__snake_case , training=__snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__snake_case , training=__snake_case )
__magic_name__ : Tuple =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Tuple =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__magic_name__ : int =(
hasattr(__snake_case , """architectures""" )
and isinstance(config.architectures , __snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ : List[Any] ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ : Optional[int] =__import__("""transformers""" , fromlist=[model_class] )
__magic_name__ : str =getattr(__snake_case , __snake_case )
__magic_name__ : List[Any] =model_cls(__snake_case )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__magic_name__ : int =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
__magic_name__ : int =config.vocab_size if hasattr(__snake_case , """vocab_size""" ) else config.encoder.vocab_size
__magic_name__ : Optional[Any] =random_input_ids(__snake_case , __snake_case , __snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__magic_name__ : List[str] =model(__snake_case , decoder_input_ids=__snake_case , labels=__snake_case , training=__snake_case )[0]
__magic_name__ : int =tf.gradients(__snake_case , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__magic_name__ : str =model(__snake_case , labels=__snake_case , training=__snake_case )[0]
__magic_name__ : int =tf.gradients(__snake_case , model.trainable_variables )
return gradients
__magic_name__ : Union[str, Any] =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ ( self :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__snake_case , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__magic_name__ : Union[str, Any] =timeit.repeat(
__snake_case , repeat=self.args.repeat , number=10 , )
return min(__snake_case ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def A__ ( self :Any , __snake_case :Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__magic_name__ : Union[str, Any] =start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__magic_name__ : str ="""N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__magic_name__ : List[str] =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__magic_name__ : Tuple =nvml.nvmlDeviceGetMemoryInfo(__snake_case )
__magic_name__ : Any =meminfo.used
__magic_name__ : str =Memory(__snake_case )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__magic_name__ : List[str] =None
else:
__magic_name__ : List[Any] =measure_peak_memory_cpu(__snake_case )
__magic_name__ : str =Memory(__snake_case ) if isinstance(__snake_case , __snake_case ) else memory_bytes
if self.args.trace_memory_line_by_line:
__magic_name__ : List[Any] =stop_memory_tracing(__snake_case )
if memory is None:
__magic_name__ : Any =summary.total
else:
__magic_name__ : Optional[Any] =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 367
| 0
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str=1E-12 ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] =jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
__lowerCamelCase : int =jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(_SCREAMING_SNAKE_CASE , norm_emb_a.T )
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__snake_case : Dict = 42
__snake_case : Any = jnp.floataa
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Tuple =FlaxCLIPVisionModule(self.config.vision_config )
__lowerCamelCase : Any =nn.Dense(self.config.projection_dim , use_bias=__lowercase , dtype=self.dtype )
__lowerCamelCase : Dict =self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__lowerCamelCase : List[Any] =self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__lowerCamelCase : Any =self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
__lowerCamelCase : Union[str, Any] =self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self :List[str] , __lowercase :Optional[int] ):
__lowerCamelCase : List[str] =self.vision_model(__lowercase )[1]
__lowerCamelCase : Tuple =self.visual_projection(__lowercase )
__lowerCamelCase : Union[str, Any] =jax_cosine_distance(__lowercase , self.special_care_embeds )
__lowerCamelCase : Dict =jax_cosine_distance(__lowercase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__lowerCamelCase : Dict =0.0
__lowerCamelCase : Optional[int] =special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__lowerCamelCase : Union[str, Any] =jnp.round(__lowercase , 3 )
__lowerCamelCase : Optional[int] =jnp.any(special_scores > 0 , axis=1 , keepdims=__lowercase )
# Use a lower threshold if an image has any special care concept
__lowerCamelCase : Optional[int] =is_special_care * 0.01
__lowerCamelCase : Dict =cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__lowerCamelCase : Tuple =jnp.round(__lowercase , 3 )
__lowerCamelCase : Tuple =jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE_ ( __snake_case ):
"""simple docstring"""
__snake_case : List[Any] = CLIPConfig
__snake_case : str = """clip_input"""
__snake_case : Dict = FlaxStableDiffusionSafetyCheckerModule
def __init__( self :List[str] , __lowercase :Optional[Any] , __lowercase :int = None , __lowercase :str = 0 , __lowercase :int = jnp.floataa , __lowercase :Any = True , **__lowercase :Optional[int] , ):
if input_shape is None:
__lowerCamelCase : List[Any] =(1, 224, 224, 3)
__lowerCamelCase : Union[str, Any] =self.module_class(config=__lowercase , dtype=__lowercase , **__lowercase )
super().__init__(__lowercase , __lowercase , input_shape=__lowercase , seed=__lowercase , dtype=__lowercase , _do_init=_do_init )
def __lowercase ( self :List[str] , __lowercase :Tuple , __lowercase :Optional[Any] , __lowercase :Tuple = None ):
# init input tensor
__lowerCamelCase : Optional[int] =jax.random.normal(__lowercase , __lowercase )
__lowerCamelCase , __lowerCamelCase : Optional[int] =jax.random.split(__lowercase )
__lowerCamelCase : List[Any] ={'''params''': params_rng, '''dropout''': dropout_rng}
__lowerCamelCase : Optional[int] =self.module.init(__lowercase , __lowercase )['''params''']
return random_params
def __call__( self :Tuple , __lowercase :Optional[Any] , __lowercase :Optional[int] = None , ):
__lowerCamelCase : Optional[Any] =jnp.transpose(__lowercase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(__lowercase , dtype=jnp.floataa ) , rngs={} , )
| 179
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 585
| 0
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase ="\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase ="\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase ="\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ) -> str:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_ ,lowerCamelCase_ ,sample_weight=lowerCamelCase_ ) ),
}
| 255
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase =list[tuple[int, int]]
UpperCAmelCase =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> int:
A = pos_x
A = pos_y
A = (pos_y, pos_x)
A = goal_x
A = goal_y
A = g_cost
A = parent
A = self.calculate_heuristic()
def UpperCamelCase__ ( self ) -> float:
A = abs(self.pos_x - self.goal_x )
A = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self ,lowerCamelCase_ ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,lowerCamelCase_ )
A = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9_9_9_9 ,lowerCamelCase_ )
A = [self.start]
A = []
A = False
def UpperCamelCase__ ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A = True
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
A = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
A = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> list[Node]:
A = []
for action in delta:
A = parent.pos_x + action[1]
A = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ ,lowerCamelCase_ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,lowerCamelCase_ ,) )
return successors
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Path:
A = node
A = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase =(0, 0)
UpperCAmelCase =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
UpperCAmelCase =GreedyBestFirst(init, goal)
UpperCAmelCase =greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase =2
for elem in grid:
print(elem)
| 255
| 1
|
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE__ = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
SCREAMING_SNAKE_CASE__ = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
SCREAMING_SNAKE_CASE__ = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE__ = len(train_data)
SCREAMING_SNAKE_CASE__ = 0.009
def A ( __UpperCamelCase , __UpperCamelCase="train" ) -> str:
return calculate_hypothesis_value(__UpperCamelCase , __UpperCamelCase ) - output(
__UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
A__ = 0
for i in range(len(__UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A ( __UpperCamelCase , __UpperCamelCase=m ) -> Optional[int]:
A__ = 0
for i in range(__UpperCamelCase ):
if index == -1:
summation_value += _error(__UpperCamelCase )
else:
summation_value += _error(__UpperCamelCase ) * train_data[i][0][index]
return summation_value
def A ( __UpperCamelCase ) -> Dict:
A__ = summation_of_cost_derivative(__UpperCamelCase , __UpperCamelCase ) / m
return cost_derivative_value
def A ( ) -> Tuple:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A__ = 0.00_0002
A__ = 0
A__ = 0
while True:
j += 1
A__ = [0, 0, 0, 0]
for i in range(0 , len(__UpperCamelCase ) ):
A__ = get_cost_derivative(i - 1 )
A__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCamelCase , __UpperCamelCase , atol=__UpperCamelCase , rtol=__UpperCamelCase , ):
break
A__ = temp_parameter_vector
print(('Number of iterations:', j) )
def A ( ) -> List[str]:
for i in range(len(__UpperCamelCase ) ):
print(('Actual output value:', output(__UpperCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(__UpperCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 9
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """depth_multiplier""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=0.25 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="relu6" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=None , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : Any = depth_multiplier
SCREAMING_SNAKE_CASE__ : int = min_depth
SCREAMING_SNAKE_CASE__ : Any = tf_padding
SCREAMING_SNAKE_CASE__ : int = int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE__ : Any = output_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCamelCase : Dict = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Tuple = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = 26
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : str = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 223
| 0
|
import math
import sys
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if number != int(a__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
lowerCAmelCase : int = [-1] * (number + 1)
lowerCAmelCase : int = 0
for i in range(1 ,number + 1 ):
lowerCAmelCase : int = sys.maxsize
lowerCAmelCase : Union[str, Any] = int(math.sqrt(a__ ) )
for j in range(1 ,root + 1 ):
lowerCAmelCase : List[Any] = 1 + answers[i - (j**2)]
lowerCAmelCase : Dict = min(a__ ,a__ )
lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693
| 0
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : torch.FloatTensor
_SCREAMING_SNAKE_CASE : torch.FloatTensor
class _lowercase ( __lowercase , __lowercase ):
_SCREAMING_SNAKE_CASE : int = 1
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int = 2000 , SCREAMING_SNAKE_CASE_ : float = 0.1_5 , SCREAMING_SNAKE_CASE_ : float = 0.0_1 , SCREAMING_SNAKE_CASE_ : float = 1_3_4_8.0 , SCREAMING_SNAKE_CASE_ : float = 1e-5 , SCREAMING_SNAKE_CASE_ : int = 1 , ) -> str:
# standard deviation of the initial noise distribution
__snake_case = sigma_max
# setable values
__snake_case = None
self.set_sigmas(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
return sample
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Any:
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case = torch.linspace(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : float = None ) -> str:
__snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE_ ) , math.log(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
__snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case = timesteps.to(self.discrete_sigmas.device )
__snake_case = self.discrete_sigmas[timesteps].to(sample.device )
__snake_case = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(sample.device )
__snake_case = torch.zeros_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__snake_case = diffusion.unsqueeze(-1 )
__snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE_ , device=sample.device , dtype=sample.dtype )
__snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE_ , prev_sample_mean=SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__snake_case = step_size.unsqueeze(-1 )
__snake_case = sample + step_size * model_output
__snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case = timesteps.to(original_samples.device )
__snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
__snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE_ ) * sigmas[:, None, None, None]
)
__snake_case = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ) -> Optional[Any]:
return self.config.num_train_timesteps
| 56
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
snake_case__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.')
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Dict , __A : Path , __A : Union[str, None] = None , __A : Union[List[str], None] = None , __A : Union[str, List[str], None] = None , __A : bool = True , ) ->Any:
"""simple docstring"""
a__ :Dict = [file for file in os.listdir(__A ) if os.path.isfile(os.path.join(__A , __A ) )]
if identifier is not None:
a__ :Union[str, Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__A , __A ):
for n_ in n_identifier:
a__ :Union[str, Any] = [file for file in files if n_ not in file]
else:
a__ :Dict = [file for file in files if n_identifier not in file]
a__ :List[str] = ignore_files or []
ignore_files.append("__init__.py" )
a__ :Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __A )
if only_modules:
a__ :Tuple = file.split("." )[0]
try:
a__ :Dict = getattr(__A , __A )
a__ :int = doctest.DocTestSuite(__A )
a__ :Any = unittest.TextTestRunner().run(__A )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
a__ :int = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _snake_case ( self : int ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = Path("src/transformers" )
a__ :Union[str, Any] = "modeling"
a__ :Any = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__A , identifier=__A , ignore_files=__A )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = Path("src/transformers" )
a__ :Dict = "tokenization"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a__ :List[Any] = Path("src/transformers" )
a__ :List[Any] = "configuration"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :List[str] = Path("src/transformers" )
a__ :str = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__A , n_identifier=__A )
def _snake_case ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
a__ :List[str] = Path("docs/source" )
a__ :Union[str, Any] = ["favicon.ico"]
self.analyze_directory(__A , ignore_files=__A , only_modules=__A )
| 395
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
A_ = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = 3_2 , _lowerCamelCase=PILImageResampling.BILINEAR , _lowerCamelCase = True , **_lowerCamelCase , )-> None:
lowercase__ = do_resize
lowercase__ = do_rescale
lowercase__ = size_divisor
lowercase__ = resample
super().__init__(**_lowerCamelCase )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase )-> np.ndarray:
lowercase__ , lowercase__ = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowercase__ = height // size_divisor * size_divisor
lowercase__ = width // size_divisor * size_divisor
lowercase__ = resize(_lowerCamelCase , (new_h, new_w) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
return image
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase )-> np.ndarray:
return rescale(image=_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , )-> BatchFeature:
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = size_divisor if size_divisor is not None else self.size_divisor
lowercase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowercase__ = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
lowercase__ = [self.resize(_lowerCamelCase , size_divisor=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(_lowerCamelCase , scale=1 / 2_5_5 ) for image in images]
lowercase__ = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 318
|
'''simple docstring'''
_lowerCAmelCase = "Input must be a string of 8 numbers plus letter"
_lowerCAmelCase = "TRWAGMYFPDXBNJZSQVHLCKE"
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
lowercase__ = F'''Expected string as input, found {type(lowercase ).__name__}'''
raise TypeError(lowercase )
lowercase__ = spanish_id.replace('''-''' , '''''' ).upper()
if len(lowercase ) != 9:
raise ValueError(lowercase )
try:
lowercase__ = int(spanish_id_clean[0:8] )
lowercase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowercase ) from ex
if letter.isdigit():
raise ValueError(lowercase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : List[str] )-> Dict:
if b == 0:
return (1, 0)
((_lowerCamelCase) , (_lowerCamelCase)) = extended_euclid(snake_case , a % b )
_lowerCamelCase = a // b
return (y, x - k * y)
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : int )-> int:
((_lowerCamelCase) , (_lowerCamelCase)) = extended_euclid(snake_case , snake_case )
_lowerCamelCase = na * na
_lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Optional[Any] )-> Tuple:
((_lowerCamelCase) , (_lowerCamelCase)) = extended_euclid(snake_case , snake_case )
if b < 0:
_lowerCamelCase = (b % n + n) % n
return b
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : List[Any] )-> Dict:
_lowerCamelCase , _lowerCamelCase = invert_modulo(snake_case , snake_case ), invert_modulo(snake_case , snake_case )
_lowerCamelCase = na * na
_lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 650
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :List[Any] = 1
@register_to_config
def __init__( self : str , _lowerCAmelCase : int = 1000 , _lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None ) -> Optional[int]:
"""simple docstring"""
self.set_timesteps(_lowerCAmelCase )
# standard deviation of the initial noise distribution
__lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowercase = 4
# running values
__lowercase = []
def _a ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None ) -> int:
"""simple docstring"""
__lowercase = num_inference_steps
__lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowercase = torch.sin(steps * math.pi / 2 ) ** 2
__lowercase = (1.0 - self.betas**2) ** 0.5
__lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowercase = timesteps.to(_lowerCAmelCase )
__lowercase = []
def _a ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowercase = (self.timesteps == timestep).nonzero().item()
__lowercase = timestep_index + 1
__lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCAmelCase )
if len(self.ets ) == 1:
__lowercase = self.ets[-1]
elif len(self.ets ) == 2:
__lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowercase = self._get_prev_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : torch.FloatTensor , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : str ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.alphas[timestep_index]
__lowercase = self.betas[timestep_index]
__lowercase = self.alphas[prev_timestep_index]
__lowercase = self.betas[prev_timestep_index]
__lowercase = (sample - sigma * ets) / max(_lowerCAmelCase , 1e-8 )
__lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 80
| 0
|
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ ):
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase_ ( lowerCamelCase__ ):
return vector * sigmoid(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import sys
from collections import defaultdict
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> int:
lowerCamelCase_ = []
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = pos
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCamelCase_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCamelCase_ = 2 * start + 1
else:
lowerCamelCase_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCamelCase_ , lowerCamelCase_ = heap[smallest_child], positions[smallest_child]
lowerCamelCase_ , lowerCamelCase_ = (
heap[start],
positions[start],
)
lowerCamelCase_ , lowerCamelCase_ = temp, tempa
lowerCamelCase_ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase )
self.top_to_bottom(lowercase , lowercase , lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = position[index]
while index != 0:
lowerCamelCase_ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCamelCase_ = heap[parent]
lowerCamelCase_ = position[parent]
self.set_position(position[parent] , lowercase )
else:
lowerCamelCase_ = val
lowerCamelCase_ = temp
self.set_position(lowercase , lowercase )
break
lowerCamelCase_ = parent
else:
lowerCamelCase_ = val
lowerCamelCase_ = temp
self.set_position(lowercase , 0 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = len(lowercase ) // 2 - 1
for i in range(lowercase , -1 , -1 ):
self.top_to_bottom(lowercase , lowercase , len(lowercase ) , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = positions[0]
lowerCamelCase_ = sys.maxsize
self.top_to_bottom(lowercase , 0 , len(lowercase ) , lowercase )
return temp
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = Heap()
lowerCamelCase_ = [0] * len(lowerCamelCase__ )
lowerCamelCase_ = [-1] * len(lowerCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCamelCase_ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCamelCase_ = []
for vertex in range(len(lowerCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCamelCase__ )
heap.node_position.append(lowerCamelCase__ )
lowerCamelCase_ = []
lowerCamelCase_ = 1
lowerCamelCase_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCamelCase_ = 0
lowerCamelCase_ = distance
heap.heapify(lowerCamelCase__ , lowerCamelCase__ )
for _ in range(1 , len(lowerCamelCase__ ) ):
lowerCamelCase_ = heap.delete_minimum(lowerCamelCase__ , lowerCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCamelCase_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCamelCase__ )]
):
lowerCamelCase_ = distance
heap.bottom_to_top(
lowerCamelCase__ , heap.get_position(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A =int(input('''Enter number of edges: ''').strip())
__A =defaultdict(list)
for _ in range(edges_number):
__A =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 313
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__magic_name__ : List[str] = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__magic_name__ : int = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__magic_name__ : List[Any] = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__magic_name__ : List[str] = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
__magic_name__ : Optional[int] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
__magic_name__ : Optional[Any] = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
_snake_case = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
_snake_case = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
_snake_case = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
_snake_case = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
_snake_case = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
_snake_case = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
_snake_case = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
_snake_case = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
_snake_case = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_snake_case = checkpoint[f'''{old_prefix}.skip_connection.weight''']
_snake_case = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_snake_case , _snake_case , _snake_case = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_snake_case = checkpoint[f'''{old_prefix}.norm.weight''']
_snake_case = checkpoint[f'''{old_prefix}.norm.bias''']
_snake_case = weight_q.squeeze(-1 ).squeeze(-1 )
_snake_case = bias_q.squeeze(-1 ).squeeze(-1 )
_snake_case = weight_k.squeeze(-1 ).squeeze(-1 )
_snake_case = bias_k.squeeze(-1 ).squeeze(-1 )
_snake_case = weight_v.squeeze(-1 ).squeeze(-1 )
_snake_case = bias_v.squeeze(-1 ).squeeze(-1 )
_snake_case = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_snake_case = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = torch.load(snake_case__ , map_location="cpu" )
_snake_case = {}
_snake_case = checkpoint["time_embed.0.weight"]
_snake_case = checkpoint["time_embed.0.bias"]
_snake_case = checkpoint["time_embed.2.weight"]
_snake_case = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
_snake_case = checkpoint["label_emb.weight"]
_snake_case = checkpoint["input_blocks.0.0.weight"]
_snake_case = checkpoint["input_blocks.0.0.bias"]
_snake_case = unet_config["down_block_types"]
_snake_case = unet_config["layers_per_block"]
_snake_case = unet_config["attention_head_dim"]
_snake_case = unet_config["block_out_channels"]
_snake_case = 1
_snake_case = channels_list[0]
for i, layer_type in enumerate(snake_case__ ):
_snake_case = channels_list[i]
_snake_case = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__ ):
_snake_case = f'''down_blocks.{i}.resnets.{j}'''
_snake_case = f'''input_blocks.{current_layer}.0'''
_snake_case = True if j == 0 and downsample_block_has_skip else False
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__ ):
_snake_case = f'''down_blocks.{i}.resnets.{j}'''
_snake_case = f'''input_blocks.{current_layer}.0'''
_snake_case = True if j == 0 and downsample_block_has_skip else False
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
_snake_case = f'''down_blocks.{i}.attentions.{j}'''
_snake_case = f'''input_blocks.{current_layer}.1'''
_snake_case = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_snake_case = f'''down_blocks.{i}.downsamplers.0'''
_snake_case = f'''input_blocks.{current_layer}.0'''
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
_snake_case = current_channels
# hardcoded the mid-block for now
_snake_case = "mid_block.resnets.0"
_snake_case = "middle_block.0"
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_snake_case = "mid_block.attentions.0"
_snake_case = "middle_block.1"
_snake_case = convert_attention(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_snake_case = "mid_block.resnets.1"
_snake_case = "middle_block.2"
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_snake_case = 0
_snake_case = unet_config["up_block_types"]
for i, layer_type in enumerate(snake_case__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_snake_case = f'''up_blocks.{i}.resnets.{j}'''
_snake_case = f'''output_blocks.{current_layer}.0'''
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_snake_case = f'''up_blocks.{i}.upsamplers.0'''
_snake_case = f'''output_blocks.{current_layer-1}.1'''
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_snake_case = f'''up_blocks.{i}.resnets.{j}'''
_snake_case = f'''output_blocks.{current_layer}.0'''
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
_snake_case = f'''up_blocks.{i}.attentions.{j}'''
_snake_case = f'''output_blocks.{current_layer}.1'''
_snake_case = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_snake_case = f'''up_blocks.{i}.upsamplers.0'''
_snake_case = f'''output_blocks.{current_layer-1}.2'''
_snake_case = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_snake_case = checkpoint["out.0.weight"]
_snake_case = checkpoint["out.0.bias"]
_snake_case = checkpoint["out.2.weight"]
_snake_case = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
__magic_name__ : Optional[Any] = parser.parse_args()
__magic_name__ : Dict = strabool(args.class_cond)
__magic_name__ : Optional[Any] = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
__magic_name__ : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__magic_name__ : List[Any] = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
__magic_name__ : Optional[Any] = None
__magic_name__ : Any = con_pt_to_diffuser(args.unet_path, unet_config)
__magic_name__ : int = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__magic_name__ : List[str] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__magic_name__ : Union[str, Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ : Optional[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
__magic_name__ : Dict = CMStochasticIterativeScheduler(**scheduler_config)
__magic_name__ : Tuple = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 672
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """perceiver"""
def __init__(self , __a=256 , __a=1280 , __a=768 , __a=1 , __a=26 , __a=8 , __a=8 , __a=None , __a=None , __a="kv" , __a=1 , __a=1 , __a="gelu" , __a=0.1 , __a=0.02 , __a=1E-1_2 , __a=True , __a=262 , __a=2048 , __a=56 , __a=[368, 496] , __a=16 , __a=1920 , __a=16 , __a=[1, 16, 224, 224] , **__a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase__ = num_latents
UpperCAmelCase__ = d_latents
UpperCAmelCase__ = d_model
UpperCAmelCase__ = num_blocks
UpperCAmelCase__ = num_self_attends_per_block
UpperCAmelCase__ = num_self_attention_heads
UpperCAmelCase__ = num_cross_attention_heads
UpperCAmelCase__ = qk_channels
UpperCAmelCase__ = v_channels
UpperCAmelCase__ = cross_attention_shape_for_attention
UpperCAmelCase__ = self_attention_widening_factor
UpperCAmelCase__ = cross_attention_widening_factor
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_query_residual
# masked language modeling attributes
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
# image classification attributes
UpperCAmelCase__ = image_size
# flow attributes
UpperCAmelCase__ = train_size
# multimodal autoencoding attributes
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = audio_samples_per_frame
UpperCAmelCase__ = samples_per_patch
UpperCAmelCase__ = output_shape
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCamelCase__ (self ) -> float:
"""simple docstring"""
return 1E-4
def UpperCamelCase__ (self , __a , __a = -1 , __a = -1 , __a = -1 , __a = False , __a = None , __a = 3 , __a = 40 , __a = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase__ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase__ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase__ = inputs.pop('input_ids' )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase__ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase__ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase__ = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 146
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : nn.Module , UpperCamelCase_ : int ) -> int:
'''simple docstring'''
super().__init__()
_lowercase : Dict = module
_lowercase : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase_ , bias=UpperCamelCase_ ) , nn.Linear(UpperCamelCase_ , module.out_features , bias=UpperCamelCase_ ) , )
_lowercase : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.module(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) + self.adapter(UpperCamelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
A_ = """bigscience/bloom-1b7"""
# Constant values
A_ = 2.1_09_65_95_52_69_25_74
A_ = """Hello my name is"""
A_ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
A_ = 10
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
_lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
_lowercase : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase_ , 'quantization_config' ) )
_lowercase : int = config.to_dict()
_lowercase : Union[str, Any] = config.to_diff_dict()
_lowercase : Optional[Any] = config.to_json_string()
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
_lowercase : Any = self.model_fpaa.get_memory_footprint()
_lowercase : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_lowercase : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_lowercase : List[Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = BitsAndBytesConfig()
_lowercase : Optional[int] = True
_lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase_ , device_map='auto' )
_lowercase : str = self.tokenizer(self.input_text , return_tensors='pt' )
_lowercase : Tuple = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with self.assertRaises(UpperCamelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase_ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase_ ):
_lowercase : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase_ , load_in_abit=UpperCamelCase_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(UpperCamelCase_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_lowercase : int = self.tokenizer(self.input_text , return_tensors='pt' )
_lowercase : Union[str, Any] = self.model_fpaa.to(torch.floataa )
_lowercase : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
_lowercase : Tuple = self.model_fpaa.to('cpu' )
# Check this does not throw an error
_lowercase : Dict = self.model_fpaa.half()
# Check this does not throw an error
_lowercase : Tuple = self.model_fpaa.float()
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=UpperCamelCase_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls : Tuple ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = 't5-small'
_lowercase : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_lowercase : Optional[int] = AutoTokenizer.from_pretrained(cls.model_name )
_lowercase : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
from transformers import TaForConditionalGeneration
_lowercase : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
_lowercase : Optional[Any] = None
# test with `t5-small`
_lowercase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
_lowercase : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowercase : Optional[Any] = model.generate(**UpperCamelCase_ )
# test with `flan-t5-small`
_lowercase : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
_lowercase : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowercase : int = model.generate(**UpperCamelCase_ )
_lowercase : Optional[Any] = modules
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_lowercase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_lowercase : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowercase : List[Any] = model.generate(**UpperCamelCase_ )
# test with `flan-t5-small`
_lowercase : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
_lowercase : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowercase : List[Any] = model.generate(**UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
super().setUp()
# model_name
_lowercase : str = 'bigscience/bloom-560m'
_lowercase : str = 't5-small'
# Different types of model
_lowercase : Tuple = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
# Sequence classification model
_lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
# CausalLM model
_lowercase : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
# Seq2seq model
_lowercase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : str = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_lowercase : int = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_lowercase : int = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
_lowercase : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
_lowercase : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_lowercase : List[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_lowercase : int = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase_ ) ):
_lowercase : Optional[Any] = LoRALayer(module.q_proj , rank=16 )
_lowercase : Optional[int] = LoRALayer(module.k_proj , rank=16 )
_lowercase : Tuple = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
_lowercase : Dict = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_lowercase : List[str] = model.forward(**UpperCamelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCamelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """gpt2-xl"""
A_ = 3.31_91_85_48_54_15_21_87
| 702
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 641
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = SpeechTaTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_UpperCAmelCase )
_A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ):
_A , _A = self.get_input_output_texts(_UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
| 7
| 0
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Any =XLMTokenizer
__lowerCamelCase : Tuple =False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__a = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__a = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__lowercase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__lowercase ) )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = """lower newer"""
__a = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = XLMTokenizer(self.vocab_file , self.merges_file )
__a = """lower"""
__a = ["""low""", """er</w>"""]
__a = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__a = tokens + ["""<unk>"""]
__a = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
__a = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowercase )
__a = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowercase )
__a = tokenizer.build_inputs_with_special_tokens(__lowercase )
__a = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 547
|
from __future__ import annotations
from collections import Counter
from random import random
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : List[str] , __lowercase : str , __lowercase : str , __lowercase : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(__lowercase )
if nodea not in self.connections:
self.add_node(__lowercase )
__a = probability
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return list(self.connections )
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = 0
__a = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = Counter(graph.get_nodes() )
__a = start
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = graph.transition(_SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str=13 , __magic_name__ : Tuple=10 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[Any]=2 , __magic_name__ : str=2 , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]=32 , __magic_name__ : str=5 , __magic_name__ : Tuple=4 , __magic_name__ : Tuple=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Tuple=10 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : Optional[Any]=0.9 , __magic_name__ : str=None , ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = image_size
__snake_case : Union[str, Any] = num_channels
__snake_case : int = patch_size
__snake_case : Dict = tubelet_size
__snake_case : Union[str, Any] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : Dict = use_labels
__snake_case : Tuple = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : List[str] = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : Optional[Any] = mask_ratio
__snake_case : int = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__snake_case : Tuple = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__snake_case : Optional[Any] = int(mask_ratio * self.seq_length )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Tuple ) -> int:
"""simple docstring"""
__snake_case : Tuple = VideoMAEModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Any ) -> int:
"""simple docstring"""
__snake_case : Dict = VideoMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__snake_case : List[str] = torch.ones((self.num_masks,) )
__snake_case : Tuple = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__snake_case : Any = mask.expand(self.batch_size , -1 ).bool()
__snake_case : Union[str, Any] = model(__magic_name__ , __magic_name__ )
# model only returns predictions for masked patches
__snake_case : List[Any] = mask.sum().item()
__snake_case : Any = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
__snake_case : List[str] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Union[str, Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase__: Any = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: int = False
lowercase__: Optional[Any] = False
lowercase__: int = False
lowercase__: List[Any] = False
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = VideoMAEModelTester(self )
__snake_case : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str=False ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__snake_case : str = torch.ones((self.model_tester.num_masks,) )
__snake_case : str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__snake_case : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
__snake_case : str = bool_masked_pos.to(__magic_name__ )
if return_labels:
if model_class in [
*get_values(__magic_name__ ),
]:
__snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
@slow
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = VideoMAEModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
__snake_case : Union[str, Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__snake_case : str = True
__snake_case : int = False
__snake_case : List[str] = True
__snake_case : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : str = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Optional[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Optional[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__snake_case : Tuple = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Dict = True
__snake_case : Optional[int] = True
__snake_case : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[str] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
__snake_case : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.hidden_states
__snake_case : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : Any = self.model_tester.seq_length - self.model_tester.num_masks
__snake_case : List[str] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
pass
def _a ( ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : Union[str, Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
__magic_name__ )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : List[Any] = prepare_video()
__snake_case : Union[str, Any] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
# verify the logits
__snake_case : Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : str = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(__magic_name__ )
__snake_case : List[Any] = self.default_image_processor
__snake_case : List[str] = prepare_video()
__snake_case : Union[str, Any] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# add boolean mask, indicating which patches to mask
__snake_case : str = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__snake_case : Tuple = torch.load(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : str = model(**__magic_name__ )
# verify the logits
__snake_case : Tuple = torch.Size([1, 14_08, 15_36] )
__snake_case : Tuple = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__magic_name__ )
self.assertEqual(outputs.logits.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__snake_case : int = torch.tensor([0.5142] , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__snake_case : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=__magic_name__ ).to(
__magic_name__ )
with torch.no_grad():
__snake_case : Optional[Any] = model(**__magic_name__ )
__snake_case : int = torch.tensor(torch.tensor([0.6469] ) , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1E-4 ) )
| 26
|
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10
| 0
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def __magic_name__ ( ) -> List[str]:
_lowercase : Optional[Any] = 9
_lowercase : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowercase : int = kruskal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE )
| 677
|
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
@property
def __a ( self : Optional[Any] ):
torch.manual_seed(0 )
A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __a ( self : Dict ):
A = self.dummy_uncond_unet
A = KarrasVeScheduler()
A = KarrasVePipeline(unet=_lowercase , scheduler=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = torch.manual_seed(0 )
A = pipe(num_inference_steps=2 , generator=_lowercase , output_type='numpy' ).images
A = torch.manual_seed(0 )
A = pipe(num_inference_steps=2 , generator=_lowercase , output_type='numpy' , return_dict=_lowercase )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A = 'google/ncsnpp-celebahq-256'
A = UNetaDModel.from_pretrained(_lowercase )
A = KarrasVeScheduler()
A = KarrasVePipeline(unet=_lowercase , scheduler=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = torch.manual_seed(0 )
A = pipe(num_inference_steps=20 , generator=_lowercase , output_type='numpy' ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 690
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690
| 1
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__UpperCamelCase : List[Any] = numpy.array([0, 0])
__UpperCamelCase : Optional[int] = numpy.array([0.5, 0.866_0254])
__UpperCamelCase : Union[str, Any] = numpy.array([1, 0])
__UpperCamelCase : str = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> list[numpy.ndarray]:
"""simple docstring"""
__a = initial_vectors
for _ in range(lowerCamelCase__ ):
__a = iteration_step(lowerCamelCase__ )
return vectors
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> list[numpy.ndarray]:
"""simple docstring"""
__a = []
for i, start_vector in enumerate(vectors[:-1] ):
__a = vectors[i + 1]
new_vectors.append(lowerCamelCase__ )
__a = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Any ) -> numpy.ndarray:
"""simple docstring"""
__a = numpy.radians(lowerCamelCase__ )
__a , __a = numpy.cos(lowerCamelCase__ ), numpy.sin(lowerCamelCase__ )
__a = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCamelCase__, lowerCamelCase__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str] ) -> None:
"""simple docstring"""
__a = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a = zip(*lowerCamelCase__ )
plt.plot(lowerCamelCase__, lowerCamelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[int] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 721
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__UpperCamelCase : Dict = logging.get_logger(__name__)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE__, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["pixel_values"]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) ->None:
'''simple docstring'''
super().__init__(**lowerCamelCase )
__a = size if size is not None else {'shortest_edge': 256}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__a = get_size_dict(lowerCamelCase , param_name='crop_size' )
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = resample
__a = do_rescale
__a = rescale_factor
__a = offset
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = None , **lowerCamelCase , ) ->np.ndarray:
'''simple docstring'''
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(lowerCamelCase , size['shortest_edge'] , default_to_square=lowerCamelCase )
elif "height" in size and "width" in size:
__a = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) ->np.ndarray:
'''simple docstring'''
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowerCamelCase , size=(size['height'], size['width']) , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ) ->Optional[Any]:
'''simple docstring'''
__a = image.astype(np.floataa )
if offset:
__a = image - (scale / 2)
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) ->np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , ) ->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
__a = to_numpy_array(lowerCamelCase )
if do_resize:
__a = self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase )
if do_center_crop:
__a = self.center_crop(lowerCamelCase , size=lowerCamelCase )
if do_rescale:
__a = self.rescale(image=lowerCamelCase , scale=lowerCamelCase , offset=lowerCamelCase )
if do_normalize:
__a = self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase )
__a = to_channel_dimension_format(lowerCamelCase , lowerCamelCase )
return image
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) ->PIL.Image.Image:
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = offset if offset is not None else self.offset
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name='crop_size' )
if not valid_images(lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__a = make_batched(lowerCamelCase )
__a = [
[
self._preprocess_image(
image=lowerCamelCase , do_resize=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , do_center_crop=lowerCamelCase , crop_size=lowerCamelCase , do_rescale=lowerCamelCase , rescale_factor=lowerCamelCase , offset=lowerCamelCase , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , data_format=lowerCamelCase , )
for img in video
]
for video in videos
]
__a = {'pixel_values': videos}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 270
| 0
|
'''simple docstring'''
from typing import Any
def _a (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
"""simple docstring"""
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
__snake_case = {}
__snake_case = {}
for state in states_space:
__snake_case = observations_space[0]
__snake_case = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__snake_case = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
__snake_case = observations_space[o]
__snake_case = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__snake_case = ''
__snake_case = -1
for k_state in states_space:
__snake_case = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__snake_case = probability
__snake_case = k_state
# Update probabilities and pointers dicts
__snake_case = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__snake_case = arg_max
# The final observation
__snake_case = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
__snake_case = ''
__snake_case = -1
for k_state in states_space:
__snake_case = probabilities[(k_state, final_observation)]
if probability > max_probability:
__snake_case = probability
__snake_case = k_state
__snake_case = arg_max
# Process pointers backwards
__snake_case = last_state
__snake_case = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
__snake_case = pointers[previous, observations_space[o]]
result.reverse()
return result
def _a (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def _a (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def _a (lowercase__ : Any , lowercase__ : Any ) -> None:
"""simple docstring"""
_validate_list(lowercase__ , 'observations_space' )
_validate_list(lowercase__ , 'states_space' )
def _a (lowercase__ : Any , lowercase__ : str ) -> None:
"""simple docstring"""
if not isinstance(_object , lowercase__ ):
__snake_case = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
__snake_case = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def _a (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
"""simple docstring"""
_validate_dict(lowercase__ , 'initial_probabilities' , lowercase__ )
_validate_nested_dict(lowercase__ , 'transition_probabilities' )
_validate_nested_dict(lowercase__ , 'emission_probabilities' )
def _a (lowercase__ : Any , lowercase__ : str ) -> None:
"""simple docstring"""
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _a (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , lowercase__ ):
__snake_case = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
__snake_case = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
__snake_case = 'nested dictionary ' if nested else ''
__snake_case = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = image.size
_UpperCamelCase , _UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
_UpperCamelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0
_UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCamelCase = torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : str , _A : VQModel , _A : UNetaDModel , _A : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=_A , unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : Union[torch.Tensor, PIL.Image.Image] = None , _A : Optional[int] = 1 , _A : Optional[int] = 100 , _A : Optional[float] = 0.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
if isinstance(_A , PIL.Image.Image ):
_UpperCamelCase = 1
elif isinstance(_A , torch.Tensor ):
_UpperCamelCase = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_A )}""" )
if isinstance(_A , PIL.Image.Image ):
_UpperCamelCase = preprocess(_A )
_UpperCamelCase , _UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCamelCase = next(self.unet.parameters() ).dtype
_UpperCamelCase = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
_UpperCamelCase = image.to(device=self.device , dtype=_A )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_A , device=self.device )
_UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
for t in self.progress_bar(_A ):
# concat latents and low resolution image in the channel dimension.
_UpperCamelCase = torch.cat([latents, image] , dim=1 )
_UpperCamelCase = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
_UpperCamelCase = self.unet(_A , _A ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# decode the image latents with the VQVAE
_UpperCamelCase = self.vqvae.decode(_A ).sample
_UpperCamelCase = torch.clamp(_A , -1.0 , 1.0 )
_UpperCamelCase = image / 2 + 0.5
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 71
|
from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self : int , _A : int ):
_UpperCamelCase = size
# approximate the overall size of segment tree with given value
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
_UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase_ ( self : str , _A : int ):
return idx * 2
def UpperCamelCase_ ( self : Any , _A : int ):
return idx * 2 + 1
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ):
if left_element == right_element:
_UpperCamelCase = a[left_element - 1]
else:
_UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_A ) , _A , _A , _A )
self.build(self.right(_A ) , mid + 1 , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCamelCase = val
if left_element != right_element:
_UpperCamelCase = val
_UpperCamelCase = val
_UpperCamelCase = True
_UpperCamelCase = True
return True
_UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_A ) , _A , _A , _A , _A , _A )
self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
return True
def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCamelCase = (left_element + right_element) // 2
_UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A )
_UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A )
return max(_A , _A )
def __str__( self : Tuple ):
return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase = 15
_lowerCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 71
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A_ ( lowercase_ , lowercase_ , lowercase_ = "x" , lowercase_ = 10**-10 , lowercase_ = 1 , ) -> List[Any]:
_snake_case : str = symbols(_A )
_snake_case : Any = lambdify(_A , _A )
_snake_case : Dict = lambdify(_A , diff(_A , _A ) )
_snake_case : Tuple = starting_point
while True:
if diff_function(_A ) != 0:
_snake_case : List[str] = prev_guess - multiplicity * func(_A ) / diff_function(
_A )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_snake_case : List[str] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"""{newton_raphson("exp(x) - 1", 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 326
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase( _A : Any , _A : List[str]=() , _A : List[str]=None , _A : Dict="no" , _A : List[str]="29500" ):
'''simple docstring'''
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase__ : List[str] = True
elif "IPython" in sys.modules:
UpperCAmelCase__ : List[Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase__ : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase__ : Tuple = 8
UpperCAmelCase__ : Optional[Any] = PrepareForLaunch(_A , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_A , args=_A , nprocs=_A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port=_A , mixed_precision=_A ):
UpperCAmelCase__ : str = PrepareForLaunch(_A , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase__ : Union[str, Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_A )
def __UpperCamelCase( _A : List[str] , _A : Optional[Any]=() , _A : str=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase__ : Optional[int] = PrepareForLaunch(_A , debug=_A )
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
| 614
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case = logging.getLogger(__name__)
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __A :
'''simple docstring'''
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=snake_case__ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=snake_case__ ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(
default=snake_case__ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
@dataclass
class __A :
'''simple docstring'''
a_ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
a_ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
a_ = field(
default=1_28 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
a_ = field(
default=snake_case__ ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
try:
_lowerCAmelCase : int = processors[data_args.task_name]()
_lowerCAmelCase : List[str] = processor.get_labels()
_lowerCAmelCase : str = len(lowerCAmelCase__ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase__ ) -> Dict:
_lowerCAmelCase : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase__ , p.label_ids )}
# Data collator
_lowerCAmelCase : Dict = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase : Optional[int] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCAmelCase : Optional[Any] = trainer.evaluate()
_lowerCAmelCase : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(lowerCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCAmelCase__ , lowerCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCAmelCase__ )
return results
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 718
|
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = word.split()
def justify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
_lowerCAmelCase : Union[str, Any] = max_width - width
_lowerCAmelCase : Optional[Any] = len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowerCAmelCase : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowerCAmelCase : Any = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowerCAmelCase : str = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCAmelCase__ ):
num_spaces_between_words_list[i] += 1
_lowerCAmelCase : Dict = []
for i in range(lowerCAmelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : list[str] = []
_lowerCAmelCase : Optional[int] = 0
for word in words:
if width + len(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCAmelCase__ )
width += len(lowerCAmelCase__ )
else:
# justify the line and add it to result
answer.append(justify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
# reset new line and new width
_lowerCAmelCase , _lowerCAmelCase : int = [word], len(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = max_width - width - len(lowerCAmelCase__ )
answer.append(" ".join(lowerCAmelCase__ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 587
| 0
|
"""simple docstring"""
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case ) -> None:
_UpperCAmelCase = set_counts
_UpperCAmelCase = max(snake_case )
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = [1] * num_sets
_UpperCAmelCase = list(range(snake_case ) )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> bool:
_UpperCAmelCase = self.get_parent(snake_case )
_UpperCAmelCase = self.get_parent(snake_case )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_UpperCAmelCase = 0
_UpperCAmelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_UpperCAmelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_UpperCAmelCase = 0
_UpperCAmelCase = src_parent
_UpperCAmelCase = self.set_counts[src_parent]
_UpperCAmelCase = max(self.max_set , snake_case )
return True
def lowerCamelCase_ ( self , snake_case ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
_UpperCAmelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 573
|
"""simple docstring"""
def UpperCAmelCase ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not len(A ) == len(A ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 573
| 1
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
_A = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def SCREAMING_SNAKE_CASE ( ) -> int:
SCREAMING_SNAKE_CASE__ = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCAmelCase , "words.txt" )
SCREAMING_SNAKE_CASE__ = ""
with open(__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE__ = f.readline()
SCREAMING_SNAKE_CASE__ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
SCREAMING_SNAKE_CASE__ = [
word
for word in [sum(ord(__UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 712
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
return (-y * np.log(__UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=70_000 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cost_function(__UpperCAmelCase , __UpperCAmelCase )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_A = datasets.load_iris()
_A = iris.data[:, :2]
_A = (iris.target != 0) * 1
_A = 0.1
_A = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('theta: ', theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Union[str, Any]:
return sigmoid_function(
np.dot(__UpperCAmelCase , __UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((_A) , (_A)) = (x[:, 0].min(), x[:, 0].max())
((_A) , (_A)) = (x[:, 1].min(), x[:, 1].max())
((_A) , (_A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_A = np.c_[xxa.ravel(), xxa.ravel()]
_A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 538
| 0
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__SCREAMING_SNAKE_CASE : Any =logging.getLogger(__name__)
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = git.Repo(search_parent_directories=lowerCAmelCase__ )
lowercase = {
"""repo_id""": str(lowerCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ ,"""git_log.json""" ) ,"""w""" ) as f:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ ,indent=4 )
def UpperCamelCase__ ( lowerCAmelCase__ ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" ,backend="""nccl""" ,)
def UpperCamelCase__ ( lowerCAmelCase__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 428
|
import functools
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = len(lowerCAmelCase__ )
lowercase = len(lowerCAmelCase__ )
@functools.cache
def min_distance(lowerCAmelCase__ ,lowerCAmelCase__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 ,lowerCAmelCase__ ) ,1 + min_distance(lowerCAmelCase__ ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,)
return min_distance(0 ,0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["ConditionalDetrFeatureExtractor"]
lowerCamelCase_ = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCamelCase_ , lowerCamelCase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCamelCase_ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCamelCase_ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase_ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 463
| 0
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__A ,'num_attention_heads' ) )
self.parent.assertTrue(hasattr(__A ,'num_encoder_blocks' ) )
class A_ :
"""simple docstring"""
def __init__( self : List[Any] ,__A : List[str] ,__A : Tuple=13 ,__A : Tuple=64 ,__A : Union[str, Any]=3 ,__A : Dict=4 ,__A : Any=[2, 2, 2, 2] ,__A : int=[8, 4, 2, 1] ,__A : int=[16, 32, 64, 128] ,__A : List[Any]=[1, 4, 8, 16] ,__A : int=[1, 2, 4, 8] ,__A : Optional[int]=True ,__A : Any=True ,__A : int="gelu" ,__A : List[Any]=0.1 ,__A : List[str]=0.1 ,__A : List[Any]=0.02 ,__A : str=3 ,__A : List[str]=None ,) -> str:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = num_encoder_blocks
_lowercase = sr_ratios
_lowercase = depths
_lowercase = hidden_sizes
_lowercase = downsampling_rates
_lowercase = num_attention_heads
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = scope
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_lowercase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __UpperCAmelCase ( self : Tuple ,__A : List[str] ,__A : Any ,__A : str ) -> Dict:
_lowercase = SegformerModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
_lowercase = _lowercase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __UpperCAmelCase ( self : Optional[Any] ,__A : int ,__A : Optional[int] ,__A : Optional[int] ) -> Union[str, Any]:
_lowercase = self.num_labels
_lowercase = SegformerForSemanticSegmentation(__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_lowercase = model(__A ,labels=__A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def __UpperCAmelCase ( self : Any ,__A : List[Any] ,__A : str ,__A : Optional[Any] ) -> Optional[int]:
_lowercase = 1
_lowercase = SegformerForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
_lowercase = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(__A )
_lowercase = model(__A ,labels=__A )
self.parent.assertGreater(result.loss ,0.0 )
def __UpperCAmelCase ( self : str ) -> int:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_lowercase = SegformerModelTester(self )
_lowercase = SegformerConfigTester(self ,config_class=__A )
def __UpperCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A )
def __UpperCAmelCase ( self : List[str] ) -> Any:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A )
@unittest.skip('SegFormer does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
for model_class in self.all_model_classes:
_lowercase = True
_lowercase = False
_lowercase = True
_lowercase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(__A ,__A ) )
_lowercase = outputs.attentions
_lowercase = sum(self.model_tester.depths )
self.assertEqual(len(__A ) ,__A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase = True
_lowercase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(__A ,__A ) )
_lowercase = outputs.attentions
self.assertEqual(len(__A ) ,__A )
# verify the first attentions (first block, first layer)
_lowercase = (self.model_tester.image_size // 4) ** 2
_lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
_lowercase = (self.model_tester.image_size // 32) ** 2
_lowercase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
_lowercase = len(__A )
# Check attention is always last and order is fine
_lowercase = True
_lowercase = True
_lowercase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(__A ,__A ) )
self.assertEqual(out_len + 1 ,len(__A ) )
_lowercase = outputs.attentions
self.assertEqual(len(__A ) ,__A )
# verify the first attentions (first block, first layer)
_lowercase = (self.model_tester.image_size // 4) ** 2
_lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def __UpperCAmelCase ( self : Dict ) -> int:
def check_hidden_states_output(__A : Union[str, Any] ,__A : int ,__A : int ):
_lowercase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(__A ,__A ) )
_lowercase = outputs.hidden_states
_lowercase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A ) ,__A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = True
check_hidden_states_output(__A ,__A ,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
check_hidden_states_output(__A ,__A ,__A )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
if not self.model_tester.is_training:
return
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ):
continue
_lowercase = model_class(__A )
model.to(__A )
model.train()
_lowercase = self._prepare_for_class(__A ,__A ,return_labels=__A )
_lowercase = model(**__A ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
pass
@slow
def __UpperCAmelCase ( self : Tuple ) -> Any:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = SegformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
# only resize + normalize
_lowercase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=__A ,align=__A ,do_random_crop=__A )
_lowercase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
__A )
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' )
_lowercase = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
_lowercase = model(__A )
_lowercase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__A ,atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self : str ) -> Any:
# only resize + normalize
_lowercase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=__A ,align=__A ,do_random_crop=__A )
_lowercase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(__A )
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' )
_lowercase = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
_lowercase = model(__A )
_lowercase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__A ,atol=1e-1 ) )
@slow
def __UpperCAmelCase ( self : str ) -> Any:
# only resize + normalize
_lowercase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=__A ,align=__A ,do_random_crop=__A )
_lowercase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
__A )
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' )
_lowercase = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
_lowercase = model(__A )
_lowercase = outputs.logits.detach().cpu()
_lowercase = image_processor.post_process_semantic_segmentation(outputs=__A ,target_sizes=[(500, 300)] )
_lowercase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,__A )
_lowercase = image_processor.post_process_semantic_segmentation(outputs=__A )
_lowercase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,__A )
| 67
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''funnel'''
SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self ,A=30_522 ,A=[4, 4, 4] ,A=None ,A=2 ,A=768 ,A=12 ,A=64 ,A=3_072 ,A="gelu_new" ,A=0.1 ,A=0.1 ,A=0.0 ,A=0.1 ,A=None ,A=1e-9 ,A="mean" ,A="relative_shift" ,A=True ,A=True ,A=True ,**A ,):
UpperCAmelCase = vocab_size
UpperCAmelCase = block_sizes
UpperCAmelCase = [1] * len(A ) if block_repeats is None else block_repeats
assert len(A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCAmelCase = num_decoder_layers
UpperCAmelCase = d_model
UpperCAmelCase = n_head
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_std
UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
UpperCAmelCase = attention_type
UpperCAmelCase = separate_cls
UpperCAmelCase = truncate_seq
UpperCAmelCase = pool_q_only
super().__init__(**A )
@property
def _UpperCamelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def _UpperCamelCase ( self ,A ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def _UpperCamelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def _UpperCamelCase ( self ,A ):
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 341
| 0
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
lowerCAmelCase__ = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
lowerCAmelCase__ = "</w>"
lowerCAmelCase__ = "@@ "
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : str = set()
UpperCAmelCase_ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Any = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class lowercase ( a_ ):
_lowerCamelCase : Optional[int]= VOCAB_FILES_NAMES
_lowerCamelCase : List[Any]= PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Any= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Dict= ["input_ids", "attention_mask"]
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case=False , _snake_case=None , **_snake_case , ) -> Optional[int]:
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
with open(_snake_case , encoding='utf-8') as vocab_handle:
UpperCAmelCase_ : Dict = json.load(_snake_case)
UpperCAmelCase_ : Any = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""")
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
else:
with open(_snake_case , encoding='utf-8') as merges_handle:
UpperCAmelCase_ : Dict = merges_handle.read().split('\n')[:-1]
UpperCAmelCase_ : List[str] = [tuple(merge.split()[:2]) for merge in merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ : Any = {}
@property
def _snake_case ( self) -> int:
return len(self.decoder)
def _snake_case ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _snake_case ( self , _snake_case) -> Dict:
UpperCAmelCase_ : str = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : List[str] = get_pairs(_snake_case)
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(_snake_case , key=lambda _snake_case: self.bpe_ranks.get(_snake_case , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ : Dict = bigram
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Tuple = 0
while i < len(_snake_case):
try:
UpperCAmelCase_ : List[Any] = word.index(_snake_case , _snake_case)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCAmelCase_ : Any = j
if word[i] == first and i < len(_snake_case) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCAmelCase_ : List[Any] = tuple(_snake_case)
UpperCAmelCase_ : Optional[int] = new_word
if len(_snake_case) == 1:
break
else:
UpperCAmelCase_ : Dict = get_pairs(_snake_case)
UpperCAmelCase_ : str = ' '.join(_snake_case)
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_ : List[Any] = '\n' + BPE_TOKEN_MERGES
if word.endswith(_snake_case):
UpperCAmelCase_ : Union[str, Any] = word.replace(_snake_case , '')
UpperCAmelCase_ : List[Any] = word.replace(' ' , _snake_case)
UpperCAmelCase_ : Union[str, Any] = word
return word
def _snake_case ( self , _snake_case) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
UpperCAmelCase_ : str = text.lower()
UpperCAmelCase_ : List[str] = text.split()
UpperCAmelCase_ : str = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case).split(' ')))
return split_tokens
def _snake_case ( self , _snake_case) -> int:
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token))
def _snake_case ( self , _snake_case) -> str:
UpperCAmelCase_ : Optional[int] = self.decoder.get(_snake_case , self.unk_token)
return result
def _snake_case ( self , _snake_case) -> str:
UpperCAmelCase_ : Optional[Any] = ' '.join(_snake_case)
# make sure @@ tokens are concatenated
UpperCAmelCase_ : str = ''.join(string.split(_snake_case))
return string
def _snake_case ( self , _snake_case , _snake_case = None) -> Tuple[str]:
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
UpperCAmelCase_ : Any = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCAmelCase_ : List[Any] = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_snake_case , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case) + '\n')
UpperCAmelCase_ : Optional[int] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case: kv[1]):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
UpperCAmelCase_ : List[str] = token_index
writer.write(' '.join(_snake_case) + '\n')
index += 1
return (vocab_file, merges_file)
| 715
|
'''simple docstring'''
import math
class lowercase :
def __init__( self , _snake_case=0) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ : Tuple = n
UpperCAmelCase_ : Optional[Any] = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # adjacency matrix for weight
UpperCAmelCase_ : Tuple = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = w
def _snake_case ( self) -> str:
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
UpperCAmelCase_ : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def _snake_case ( self , _snake_case , _snake_case) -> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 471
| 0
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class snake_case ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = 5
# Realm tok
UpperCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCAmelCase__ = os.path.join(lowerCamelCase_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
def UpperCAmelCase ( self : Union[str, Any] ) ->RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def UpperCAmelCase ( self : str ) ->Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : List[Any] ) ->List[str]:
'''simple docstring'''
UpperCAmelCase__ = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self : Optional[int] ) ->Dict:
'''simple docstring'''
UpperCAmelCase__ = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def UpperCAmelCase ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
UpperCAmelCase__ = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=lowerCamelCase_ , )
return block_records
def UpperCAmelCase ( self : Optional[int] ) ->int:
'''simple docstring'''
UpperCAmelCase__ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
UpperCAmelCase__ = self.get_config()
UpperCAmelCase__ = self.get_dummy_retriever()
UpperCAmelCase__ = retriever.tokenizer
UpperCAmelCase__ = np.array([0, 3] , dtype="""long""" )
UpperCAmelCase__ = tokenizer(["""Test question"""] ).input_ids
UpperCAmelCase__ = tokenizer(
["""the fourth"""] , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ).input_ids
UpperCAmelCase__ = config.reader_seq_len
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = retriever(
lowerCamelCase_ , lowerCamelCase_ , answer_ids=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors="""np""" )
self.assertEqual(len(lowerCamelCase_ ) , 2 )
self.assertEqual(len(lowerCamelCase_ ) , 2 )
self.assertEqual(len(lowerCamelCase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def UpperCAmelCase ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = self.get_config()
UpperCAmelCase__ = self.get_dummy_retriever()
UpperCAmelCase__ = retriever.tokenizer
UpperCAmelCase__ = np.array([0, 3, 5] , dtype="""long""" )
UpperCAmelCase__ = tokenizer(["""Test question"""] ).input_ids
UpperCAmelCase__ = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ).input_ids
UpperCAmelCase__ = config.reader_seq_len
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = retriever(
lowerCamelCase_ , lowerCamelCase_ , answer_ids=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , lowerCamelCase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCamelCase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCamelCase_ )
def UpperCAmelCase ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
UpperCAmelCase__ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
UpperCAmelCase__ = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
UpperCAmelCase__ = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase__ = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 392
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case__ : Tuple = pd.read_csv('''sample_data.csv''', header=None)
snake_case__ : List[str] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case__ : Dict = df.iloc[:, 1:2]
snake_case__ : List[str] = actual_data.values.reshape(len_data, 1)
snake_case__ : Union[str, Any] = MinMaxScaler().fit_transform(actual_data)
snake_case__ : Tuple = 1_0
snake_case__ : str = 5
snake_case__ : Any = 2_0
snake_case__ : Union[str, Any] = len_data - periods * look_back
snake_case__ : Union[str, Any] = actual_data[:division]
snake_case__ : Optional[Any] = actual_data[division - look_back :]
snake_case__ , snake_case__ : Dict = [], []
snake_case__ , snake_case__ : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case__ : int = np.array(train_x)
snake_case__ : List[str] = np.array(test_x)
snake_case__ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case__ : int = np.array([list(i.ravel()) for i in test_y])
snake_case__ : List[Any] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
snake_case__ : List[str] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
snake_case__ : Optional[int] = model.predict(x_test)
| 392
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Any = """sew"""
def __init__(self , lowerCAmelCase_=32 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_=2 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1e-5 , lowerCAmelCase_="group" , lowerCAmelCase_="gelu" , lowerCAmelCase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_=False , lowerCAmelCase_=128 , lowerCAmelCase_=16 , lowerCAmelCase_=True , lowerCAmelCase_=0.05 , lowerCAmelCase_=10 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=10 , lowerCAmelCase_=0 , lowerCAmelCase_="mean" , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=256 , lowerCAmelCase_=0 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
A_ : int = hidden_size
A_ : List[str] = feat_extract_norm
A_ : Union[str, Any] = feat_extract_activation
A_ : int = list(lowerCAmelCase_ )
A_ : Tuple = list(lowerCAmelCase_ )
A_ : int = list(lowerCAmelCase_ )
A_ : Tuple = conv_bias
A_ : Union[str, Any] = num_conv_pos_embeddings
A_ : Optional[int] = num_conv_pos_embedding_groups
A_ : Any = len(self.conv_dim )
A_ : Optional[Any] = num_hidden_layers
A_ : int = intermediate_size
A_ : Dict = squeeze_factor
A_ : Tuple = hidden_act
A_ : Any = num_attention_heads
A_ : List[Any] = hidden_dropout
A_ : Union[str, Any] = attention_dropout
A_ : List[Any] = activation_dropout
A_ : Optional[Any] = feat_proj_dropout
A_ : Any = final_dropout
A_ : Tuple = layerdrop
A_ : Optional[Any] = layer_norm_eps
A_ : Dict = initializer_range
A_ : str = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : str = apply_spec_augment
A_ : List[str] = mask_time_prob
A_ : Union[str, Any] = mask_time_length
A_ : Any = mask_time_min_masks
A_ : List[Any] = mask_feature_prob
A_ : List[Any] = mask_feature_length
A_ : Union[str, Any] = mask_feature_min_masks
# ctc loss
A_ : Any = ctc_loss_reduction
A_ : Optional[Any] = ctc_zero_infinity
# sequence classification
A_ : Any = use_weighted_layer_sum
A_ : List[str] = classifier_proj_size
@property
def lowerCamelCase(self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 480
|
"""simple docstring"""
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case__ ) )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
# Base Case
if index == len(snake_case__ ):
return True
# Recursive Step
for i in range(snake_case__ ):
if valid_coloring(graph[index] , snake_case__ , snake_case__ ):
# Color current vertex
A_ : Dict = i
# Validate coloring
if util_color(snake_case__ , snake_case__ , snake_case__ , index + 1 ):
return True
# Backtrack
A_ : Union[str, Any] = -1
return False
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : int = [-1] * len(snake_case__ )
if util_color(snake_case__ , snake_case__ , snake_case__ , 0 ):
return colored_vertices
return []
| 480
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
__magic_name__ : Optional[int] = 10
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
__magic_name__ : Optional[int] = [1, 2, 3, 4]
__magic_name__ : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
__magic_name__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__magic_name__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple:
__magic_name__ : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__magic_name__ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
__magic_name__ : str = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
__magic_name__ , __magic_name__ : Tuple = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = ""
__magic_name__ , __magic_name__ : Optional[Any] = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
__magic_name__ : str = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
__magic_name__ , __magic_name__ : List[str] = process_story(__UpperCamelCase )
__magic_name__ : Optional[Any] = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
__magic_name__ : Optional[int] = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: int ) -> Dict:
__magic_name__ : Any = torch.tensor([1, 2, 3, 4] )
__magic_name__ : List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: Tuple ) -> Tuple:
__magic_name__ : Union[str, Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__magic_name__ : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__magic_name__ : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: List[str] ) -> str:
__magic_name__ : Union[str, Any] = 101
__magic_name__ : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__magic_name__ : Dict = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__magic_name__ : List[str] = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 436
|
'''simple docstring'''
import argparse
import struct
import unittest
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] , __UpperCamelCase: bytes ) -> None:
__magic_name__ : str = data
# Initialize hash values
__magic_name__ : List[str] = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
__magic_name__ : Dict = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
__magic_name__ : str = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase: bytes ) -> bytes:
__magic_name__ : List[Any] = b"\x80" + (b"\x00" * (63 - (len(__UpperCamelCase ) + 8) % 64))
__magic_name__ : str = struct.pack(">Q" , (len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def lowerCAmelCase__ ( self: int ) -> None:
# Convert into blocks of 64 bytes
__magic_name__ : Any = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__magic_name__ : Tuple = list(struct.unpack(">16L" , __UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__magic_name__ : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__magic_name__ : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__magic_name__ : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
__magic_name__ : Union[str, Any] = self.ror(__UpperCamelCase , 6 ) ^ self.ror(__UpperCamelCase , 11 ) ^ self.ror(__UpperCamelCase , 25 )
__magic_name__ : Tuple = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
__magic_name__ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
__magic_name__ : int = self.ror(__UpperCamelCase , 2 ) ^ self.ror(__UpperCamelCase , 13 ) ^ self.ror(__UpperCamelCase , 22 )
__magic_name__ : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
__magic_name__ : int = (sa + maj) % 0x1_0000_0000
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
__magic_name__ : Optional[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
__magic_name__ : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
__magic_name__ : List[str] = "".join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def lowerCAmelCase__ ( self: List[Any] , __UpperCamelCase: int , __UpperCamelCase: int ) -> int:
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: Dict ) -> None:
import hashlib
__magic_name__ : Tuple = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash , hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def _UpperCamelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
__magic_name__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
__magic_name__ : Any = parser.parse_args()
__magic_name__ : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
__magic_name__ : Union[str, Any] = f.read()
else:
__magic_name__ : List[str] = bytes(UpperCamelCase__ , "utf-8" )
print(SHAaaa(UpperCamelCase__ ).hash )
if __name__ == "__main__":
main()
| 436
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , *_A , **_A):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 702
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , _A , _A , _A , _A = None , ):
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A)
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(','):
SCREAMING_SNAKE_CASE_ = int(_A)
SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items()))
def lowerCAmelCase__ ( self , _A):
if not isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = list(_A)
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ):
SCREAMING_SNAKE_CASE_ = len(_A)
SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1)
SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device)
SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2]
SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0)
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A)
SCREAMING_SNAKE_CASE_ = t
if not torch.is_tensor(_A):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps'
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
SCREAMING_SNAKE_CASE_ = self.transformer(
_A , timestep=_A , class_labels=_A).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0)
SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0)
SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1)
else:
SCREAMING_SNAKE_CASE_ = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0)
else:
SCREAMING_SNAKE_CASE_ = latent_model_input
SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample
SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A)
| 620
| 0
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( UpperCAmelCase__):
__a : List[str] = (EulerDiscreteScheduler,)
__a : Optional[int] = 1_0
def __snake_case ( self , **_A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_A )
return config
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_A )
def __snake_case ( self ) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Dict = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(_A , _A )
_UpperCAmelCase : Optional[int] = model(_A , _A )
_UpperCAmelCase : Dict = scheduler.step(_A , _A , _A , generator=_A )
_UpperCAmelCase : Any = output.prev_sample
_UpperCAmelCase : Dict = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase : Tuple = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : List[Any] = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Dict = scheduler.scale_model_input(_A , _A )
_UpperCAmelCase : Optional[Any] = model(_A , _A )
_UpperCAmelCase : Tuple = scheduler.step(_A , _A , _A , generator=_A )
_UpperCAmelCase : Dict = output.prev_sample
_UpperCAmelCase : Tuple = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCAmelCase : Any = torch.manual_seed(0 )
_UpperCAmelCase : str = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : List[str] = sample.to(_A )
for t in scheduler.timesteps:
_UpperCAmelCase : List[str] = scheduler.scale_model_input(_A , _A )
_UpperCAmelCase : Any = model(_A , _A )
_UpperCAmelCase : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A )
_UpperCAmelCase : List[Any] = output.prev_sample
_UpperCAmelCase : int = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : Dict = sample.to(_A )
for t in scheduler.timesteps:
_UpperCAmelCase : List[Any] = scheduler.scale_model_input(_A , _A )
_UpperCAmelCase : Tuple = model(_A , _A )
_UpperCAmelCase : List[str] = scheduler.step(_A , _A , _A , generator=_A )
_UpperCAmelCase : Optional[Any] = output.prev_sample
_UpperCAmelCase : List[str] = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 238
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=False ):
"""simple docstring"""
snake_case__ : str =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[str] =[(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : List[str] =''''''
else:
snake_case__ : Any ='''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Any =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ : Any =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] =in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] =in_proj_bias[: config.hidden_size]
snake_case__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Tuple =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] =in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Union[str, Any] =in_proj_bias[-config.hidden_size :]
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
snake_case__ : Any =dct.pop(SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =val
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Tuple =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Optional[Any] =DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : List[Any] =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Union[str, Any] =10_00
snake_case__ : Union[str, Any] ='''huggingface/label-files'''
snake_case__ : Optional[int] ='''imagenet-1k-id2label.json'''
snake_case__ : Union[str, Any] =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Optional[int] ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case__ : Tuple =idalabel
snake_case__ : str ={v: k for k, v in idalabel.items()}
snake_case__ : Tuple =int(deit_name[-6:-4] )
snake_case__ : Tuple =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
snake_case__ : Tuple =1_92
snake_case__ : str =7_68
snake_case__ : List[str] =12
snake_case__ : List[Any] =3
elif deit_name[9:].startswith('''small''' ):
snake_case__ : str =3_84
snake_case__ : List[Any] =15_36
snake_case__ : str =12
snake_case__ : Optional[int] =6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
snake_case__ : str =10_24
snake_case__ : Dict =40_96
snake_case__ : Dict =24
snake_case__ : Tuple =16
# load original model from timm
snake_case__ : Tuple =timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Tuple =timm_model.state_dict()
snake_case__ : int =create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
snake_case__ : int =DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : int =int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] =DeiTImageProcessor(size=SCREAMING_SNAKE_CASE , crop_size=config.image_size )
snake_case__ : str =image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ : int =encoding['''pixel_values''']
snake_case__ : Tuple =model(SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =timm_model(SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 720
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =DebertaVaTokenizer
lowerCAmelCase__ =DebertaVaTokenizerFast
lowerCAmelCase__ =True
lowerCAmelCase__ =True
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Union[str, Any] =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] ='''this is a test'''
snake_case__ : Optional[int] ='''this is a test'''
return input_text, output_text
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] ='''<pad>'''
snake_case__ : int =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 3_0001 )
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int =''' \tHeLLo!how \n Are yoU? '''
snake_case__ : List[str] =['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
snake_case__ : str =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE )
snake_case__ : str =tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] ='''I was born in 92000, and this is falsé.'''
snake_case__ : int =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
snake_case__ : Tuple =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple ='''I was born in 92000, and this is falsé.'''
snake_case__ : Tuple =['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
snake_case__ : Optional[Any] =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict =DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str ='''I was born in 92000, and this is falsé.'''
snake_case__ : Optional[Any] =['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
snake_case__ : Dict =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : Any =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Any ='''I was born in 92000, and this is falsé.'''
snake_case__ : int =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
snake_case__ : Any =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : int =DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] =''' \tHeLLo!how \n Are yoU? '''
snake_case__ : Tuple =['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
snake_case__ : Tuple =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str =DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] =self.get_tokenizer()
snake_case__ : Tuple =self.get_rust_tokenizer()
snake_case__ : int ='''I was born in 92000, and this is falsé.'''
snake_case__ : int =tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
snake_case__ : List[str] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any =tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =self.get_rust_tokenizer()
snake_case__ : Dict =tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple ='''This is a test'''
snake_case__ : List[str] =[13, 1, 4398, 25, 21, 1289]
snake_case__ : Union[str, Any] =['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
snake_case__ : Any =['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
snake_case__ : Union[str, Any] =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : int =tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict =rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str =rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =rust_tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# fmt: off
snake_case__ : Union[str, Any] ='''I was born in 92000, and this is falsé.'''
snake_case__ : Union[str, Any] =[13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case__ : List[str] =['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
snake_case__ : int =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
snake_case__ : Union[str, Any] =tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict =tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict =tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str =rust_tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] =DebertaVaTokenizer(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =tokenizer.encode('''sequence builders''' )
snake_case__ : str =tokenizer.encode('''multi-sequence build''' )
snake_case__ : List[Any] =tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Any =tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __SCREAMING_SNAKE_CASE )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __SCREAMING_SNAKE_CASE , )
@slow
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Any ={'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 408
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger()
@dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : list = field(default_factory=lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Tensor , snake_case_ : Tensor ):
__snake_case = len(list(m.modules() ) ) == 1 or isinstance(lowercase__ , nn.Convad ) or isinstance(lowercase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase__ )
def __call__( self : Dict , snake_case_ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCAmelCase ( self : List[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : List = field(default_factory=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List = field(default_factory=lowerCAmelCase_ )
def __call__( self : str , snake_case_ : Tensor ):
__snake_case = Tracker(self.dest )(lowercase__ ).parametrized
__snake_case = Tracker(self.src )(lowercase__ ).parametrized
__snake_case = list(filter(lambda snake_case_ : type(lowercase__ ) not in self.src_skip , lowercase__ ) )
__snake_case = list(filter(lambda snake_case_ : type(lowercase__ ) not in self.dest_skip , lowercase__ ) )
if len(lowercase__ ) != len(lowercase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowercase__ )} operations while'''
F''' destination module has {len(lowercase__ )}.''' )
for dest_m, src_m in zip(lowercase__ , lowercase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ) -> List[str]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
__snake_case = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ).eval()
__snake_case = ResNetForImageClassification(_lowerCamelCase ).eval()
__snake_case = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase )
__snake_case = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_lowerCamelCase )
assert torch.allclose(from_model(_lowerCamelCase ) , our_model(_lowerCamelCase ).logits ), "The model logits don't match the original one."
__snake_case = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
# we can use the convnext one
__snake_case = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
print(F'''Pushed {checkpoint_name}''' )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True ) -> Any:
"""simple docstring"""
__snake_case = "imagenet-1k-id2label.json"
__snake_case = 10_00
__snake_case = (1, num_labels)
__snake_case = "huggingface/label-files"
__snake_case = num_labels
__snake_case = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
__snake_case = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_lowerCamelCase , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 163
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_lowerCamelCase, _lowerCamelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ =logging.get_logger(__name__)
def __UpperCamelCase ( A , A=False ):
UpperCamelCase__ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( A , A , A=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = ''''''
else:
UpperCamelCase__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( A ):
UpperCamelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = dct.pop(A )
UpperCamelCase__ = val
def __UpperCamelCase ( ):
UpperCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( A , A , A=False ):
UpperCamelCase__ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=A , )
UpperCamelCase__ = ViTHybridConfig(backbone_config=A , image_size=384 , num_labels=1000 )
UpperCamelCase__ = False
# load original model from timm
UpperCamelCase__ = timm.create_model(A , pretrained=A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(A )
UpperCamelCase__ = create_rename_keys(A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , A )
UpperCamelCase__ = '''huggingface/label-files'''
UpperCamelCase__ = '''imagenet-1k-id2label.json'''
UpperCamelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ = {int(A ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase__ = ViTHybridModel(A ).eval()
else:
UpperCamelCase__ = ViTHybridForImageClassification(A ).eval()
model.load_state_dict(A )
# create image processor
UpperCamelCase__ = create_transform(**resolve_data_config({} , model=A ) )
UpperCamelCase__ = transform.transforms
UpperCamelCase__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCamelCase__ = ViTHybridImageProcessor(
do_resize=A , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = transform(A ).unsqueeze(0 )
UpperCamelCase__ = processor(A , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(A , A )
# verify logits
with torch.no_grad():
UpperCamelCase__ = model(A )
UpperCamelCase__ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
UpperCamelCase__ = timm_model.forward_features(A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A , outputs.pooler_output , atol=1e-3 )
else:
UpperCamelCase__ = timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(A )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
__magic_name__ =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 710
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ =datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _A ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] =None
def __UpperCamelCase ( A , A , ):
import pyspark
def generate_fn():
UpperCamelCase__ = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
UpperCamelCase__ = df_with_partition_id.select('''*''' ).where(f"part_id = {partition_id}" ).drop('''part_id''' )
UpperCamelCase__ = partition_df.collect()
UpperCamelCase__ = 0
for row in rows:
yield f"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class _A ( _BaseExamplesIterable ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = df
UpperCamelCase__ = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase__ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ) -> int:
'''simple docstring'''
yield from self.generate_examples_fn()
def _a (self , SCREAMING_SNAKE_CASE_ ) -> "SparkExamplesIterable":
'''simple docstring'''
UpperCamelCase__ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE_ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> "SparkExamplesIterable":
'''simple docstring'''
UpperCamelCase__ = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE_ )
@property
def _a (self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _A ( datasets.DatasetBuilder ):
SCREAMING_SNAKE_CASE_ : List[str] =SparkConfig
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
UpperCamelCase__ = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase__ = df
UpperCamelCase__ = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE_ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE_ , )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE_ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase__ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _a (self ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
UpperCamelCase__ = self.df.count()
UpperCamelCase__ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase__ = (
self.df.limit(SCREAMING_SNAKE_CASE_ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE_ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase__ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase__ = min(SCREAMING_SNAKE_CASE_ , int(approx_total_size / max_shard_size ) )
UpperCamelCase__ = self.df.repartition(SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
UpperCamelCase__ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
UpperCamelCase__ = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) if self._working_dir else fpath
UpperCamelCase__ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase__ = self.config.features
UpperCamelCase__ = self._writer_batch_size
UpperCamelCase__ = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase__ = pyspark.TaskContext().taskAttemptId()
UpperCamelCase__ = next(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
UpperCamelCase__ = 0
UpperCamelCase__ = writer_class(
features=SCREAMING_SNAKE_CASE_ , path=working_fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , writer_batch_size=SCREAMING_SNAKE_CASE_ , storage_options=SCREAMING_SNAKE_CASE_ , embed_local_files=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase__ , UpperCamelCase__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
UpperCamelCase__ = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , writer_batch_size=SCREAMING_SNAKE_CASE_ , storage_options=SCREAMING_SNAKE_CASE_ , embed_local_files=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE_ )
if writer._num_bytes > 0:
UpperCamelCase__ , UpperCamelCase__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase__ = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , os.path.basename(SCREAMING_SNAKE_CASE_ ) )
shutil.move(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE_ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "arrow" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
'''simple docstring'''
self._validate_cache_dir()
UpperCamelCase__ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = not is_remote_filesystem(self._fs )
UpperCamelCase__ = os.path.join if is_local else posixpath.join
UpperCamelCase__ = '''-TTTTT-SSSSS-of-NNNNN'''
UpperCamelCase__ = F"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
UpperCamelCase__ = path_join(self._output_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = []
UpperCamelCase__ = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = total_num_examples
UpperCamelCase__ = total_num_bytes
# should rename everything at the end
logger.debug(F"Renaming {total_shards} shards." )
if total_shards > 1:
UpperCamelCase__ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase__ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
rename(
SCREAMING_SNAKE_CASE_ , fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , fpath.replace('''TTTTT-SSSSS''' , F"{global_shard_id:05d}" ).replace('''NNNNN''' , F"{total_shards:05d}" ) , )
UpperCamelCase__ = []
UpperCamelCase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase__ , UpperCamelCase__ = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ).map(lambda SCREAMING_SNAKE_CASE_ : _rename_shard(*SCREAMING_SNAKE_CASE_ ) ).collect()
else:
# don't use any pattern
UpperCamelCase__ = 0
UpperCamelCase__ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , fpath.replace(SCREAMING_SNAKE_CASE_ , '''''' ) , )
def _a (self , SCREAMING_SNAKE_CASE_ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 469
| 0
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[Any] = [0]
_lowerCAmelCase : Any = [0]
_lowerCAmelCase : Dict = len(_A )
self.assertEqual(k.knapsack(_A ,_A ,_A ,_A ) ,0 )
_lowerCAmelCase : Any = [60]
_lowerCAmelCase : Optional[Any] = [10]
_lowerCAmelCase : Union[str, Any] = len(_A )
self.assertEqual(k.knapsack(_A ,_A ,_A ,_A ) ,0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : str = [1, 2, 3]
_lowerCAmelCase : Optional[int] = [3, 2, 1]
_lowerCAmelCase : Union[str, Any] = len(_A )
self.assertEqual(k.knapsack(_A ,_A ,_A ,_A ) ,5 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 50
_lowerCAmelCase : Optional[Any] = [60, 100, 120]
_lowerCAmelCase : Any = [10, 20, 30]
_lowerCAmelCase : Any = len(_A )
self.assertEqual(k.knapsack(_A ,_A ,_A ,_A ) ,220 )
if __name__ == "__main__":
unittest.main()
| 259
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase="pt" ):
'''simple docstring'''
_lowerCAmelCase : str = {'add_prefix_space': True} if isinstance(_lowerCamelCase , _lowerCamelCase ) and not line.startswith(' ' ) else {}
_lowerCAmelCase : List[str] = padding_side
return tokenizer(
[line] , max_length=_lowerCamelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : str = input_ids.ne(_lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ,_A ,_A="train" ,_A=None ,_A=None ,_A=None ,_A="" ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = Path(_A ).joinpath(type_path + '.source' )
_lowerCAmelCase : Optional[int] = Path(_A ).joinpath(type_path + '.target' )
_lowerCAmelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCAmelCase : Tuple = max_source_length
_lowerCAmelCase : Union[str, Any] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : List[Any] = prefix
if n_obs is not None:
_lowerCAmelCase : int = self.src_lens[:n_obs]
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Any = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = index + 1 # linecache starts at 1
_lowerCAmelCase : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) ,_A ).rstrip('\n' )
_lowerCAmelCase : Optional[int] = linecache.getline(str(self.tgt_file ) ,_A ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_A ) else self.tokenizer
)
_lowerCAmelCase : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,_A ) else self.tokenizer
_lowerCAmelCase : Union[str, Any] = encode_line(_A ,_A ,self.max_source_length ,'right' )
_lowerCAmelCase : Optional[int] = encode_line(_A ,_A ,self.max_target_length ,'right' )
_lowerCAmelCase : Tuple = source_inputs['input_ids'].squeeze()
_lowerCAmelCase : int = target_inputs['input_ids'].squeeze()
_lowerCAmelCase : Optional[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
return [len(_A ) for x in Path(_A ).open().readlines()]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.stack([x['input_ids'] for x in batch] )
_lowerCAmelCase : List[Any] = torch.stack([x['attention_mask'] for x in batch] )
_lowerCAmelCase : Any = torch.stack([x['decoder_input_ids'] for x in batch] )
_lowerCAmelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : int = trim_batch(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = trim_batch(_A ,_A ,attention_mask=_A )
_lowerCAmelCase : List[str] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_git_info()
save_json(_lowerCamelCase , os.path.join(_lowerCamelCase , 'git_log.json' ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=4 , **_lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
return json.load(_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = git.Repo(search_parent_directories=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {
'repo_id': str(_lowerCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return list(map(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'wb' ) as f:
return pickle.dump(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def remove_articles(_lowerCamelCase ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _lowerCamelCase )
def white_space_fix(_lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : List[str] = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : Optional[Any] = Counter(_lowerCamelCase ) & Counter(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
_lowerCAmelCase : Any = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : str = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0
for hypo, pred in zip(_lowerCamelCase , _lowerCamelCase ):
em += exact_match_score(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
em /= len(_lowerCamelCase )
return {"em": em}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCAmelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not hasattr(_lowerCamelCase , _lowerCamelCase ) and not hasattr(_lowerCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
continue
_lowerCAmelCase : Optional[Any] = p if hasattr(_lowerCamelCase , _lowerCamelCase ) else equivalent_param[p]
setattr(_lowerCamelCase , _lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
return hparams, config
| 259
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase ="Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _A ( ):
"""simple docstring"""
A = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A = get_sagemaker_input()
else:
A = get_cluster_input()
return config
def _A ( _a : Any=None ):
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("""config""" , description=_a )
else:
A = argparse.ArgumentParser("""Accelerate config command""" , description=_a )
parser.add_argument(
"""--config_file""" , default=_a , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_a )
return parser
def _A ( _a : Optional[Any] ):
"""simple docstring"""
A = get_user_input()
if args.config_file is not None:
A = args.config_file
else:
if not os.path.isdir(_a ):
os.makedirs(_a )
A = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_a )
else:
config.to_yaml_file(_a )
print(f'accelerate configuration saved at {config_file}' )
def _A ( ):
"""simple docstring"""
A = config_command_parser()
A = parser.parse_args()
config_command(_a )
if __name__ == "__main__":
main()
| 255
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = 42
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self ,lowerCamelCase_ = 3_2 ,lowerCamelCase_ = 6_4 ,lowerCamelCase_ = 2_0 ,lowerCamelCase_ = 7_6_8 ,lowerCamelCase_=7_7 ,lowerCamelCase_=4 ,lowerCamelCase_ = 0.0 ,lowerCamelCase_ = "silu" ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = "linear" ,lowerCamelCase_ = "prd" ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,) -> Optional[int]:
super().__init__()
A = num_attention_heads
A = attention_head_dim
A = num_attention_heads * attention_head_dim
A = additional_embeddings
A = time_embed_dim or inner_dim
A = embedding_proj_dim or embedding_dim
A = clip_embed_dim or embedding_dim
A = Timesteps(lowerCamelCase_ ,lowerCamelCase_ ,0 )
A = TimestepEmbedding(lowerCamelCase_ ,lowerCamelCase_ ,out_dim=lowerCamelCase_ ,act_fn=lowerCamelCase_ )
A = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ )
if embedding_proj_norm_type is None:
A = None
elif embedding_proj_norm_type == "layer":
A = nn.LayerNorm(lowerCamelCase_ )
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
A = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ )
if encoder_hid_proj_type is None:
A = None
elif encoder_hid_proj_type == "linear":
A = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ )
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
A = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,lowerCamelCase_ ) )
if added_emb_type == "prd":
A = nn.Parameter(torch.zeros(1 ,1 ,lowerCamelCase_ ) )
elif added_emb_type is None:
A = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
A = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,dropout=lowerCamelCase_ ,activation_fn="""gelu""" ,attention_bias=lowerCamelCase_ ,)
for d in range(lowerCamelCase_ )
] )
if norm_in_type == "layer":
A = nn.LayerNorm(lowerCamelCase_ )
elif norm_in_type is None:
A = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' )
A = nn.LayerNorm(lowerCamelCase_ )
A = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ )
A = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_00_00.0 )
causal_attention_mask.triu_(1 )
A = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" ,lowerCamelCase_ ,persistent=lowerCamelCase_ )
A = nn.Parameter(torch.zeros(1 ,lowerCamelCase_ ) )
A = nn.Parameter(torch.zeros(1 ,lowerCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase__ ( self ) -> Dict[str, AttentionProcessor]:
A = {}
def fn_recursive_add_processors(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ):
if hasattr(lowerCamelCase_ ,"""set_processor""" ):
A = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' ,lowerCamelCase_ ,lowerCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return processors
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
A = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and len(lowerCamelCase_ ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(lowerCamelCase_ )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ):
if hasattr(lowerCamelCase_ ,"""set_processor""" ):
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
module.set_processor(lowerCamelCase_ )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' ,lowerCamelCase_ ,lowerCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,) -> Tuple:
A = hidden_states.shape[0]
A = timestep
if not torch.is_tensor(lowerCamelCase_ ):
A = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
A = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A = timesteps * torch.ones(lowerCamelCase_ ,dtype=timesteps.dtype ,device=timesteps.device )
A = self.time_proj(lowerCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A = timesteps_projected.to(dtype=self.dtype )
A = self.time_embedding(lowerCamelCase_ )
if self.embedding_proj_norm is not None:
A = self.embedding_proj_norm(lowerCamelCase_ )
A = self.embedding_proj(lowerCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A = self.encoder_hidden_states_proj(lowerCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A = self.proj_in(lowerCamelCase_ )
A = self.positional_embedding.to(hidden_states.dtype )
A = []
A = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A = hidden_states[:, None, :]
A = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase_ ,-1 ,-1 )
additional_embeds.append(lowerCamelCase_ )
A = torch.cat(
lowerCamelCase_ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A = F.pad(
lowerCamelCase_ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
A = hidden_states + positional_embeddings
if attention_mask is not None:
A = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
A = F.pad(lowerCamelCase_ ,(0, self.additional_embeddings) ,value=0.0 )
A = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
A = self.norm_in(lowerCamelCase_ )
for block in self.transformer_blocks:
A = block(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
A = self.norm_out(lowerCamelCase_ )
if self.prd_embedding is not None:
A = hidden_states[:, -1]
else:
A = hidden_states[:, additional_embeddings_len:]
A = self.proj_to_clip_embeddings(lowerCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
A = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 255
| 1
|
'''simple docstring'''
from timeit import timeit
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
snake_case_ = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
snake_case_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase_( ):
'''simple docstring'''
def do_benchmark(snake_case : int ) -> None:
snake_case_ = "import __main__ as z"
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(snake_case ) = }' )
snake_case_ = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=snake_case )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }' )
snake_case_ = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=snake_case , )
print(f'timeit() runs in {timing} seconds' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 400
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
_SCREAMING_SNAKE_CASE : List[str] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : str = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["input_ids", "attention_mask"]
lowerCAmelCase_ : Dict = BartTokenizer
def __init__( self , a__=None , a__=None , a__=None , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , a__=True , **a__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , errors=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , trim_offsets=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = "post_processor"
snake_case_ = getattr(self.backend_tokenizer , a__ , a__ )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state["sep"] )
if "cls" in state:
snake_case_ = tuple(state["cls"] )
snake_case_ = False
if state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get("trim_offsets" , a__ ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(a__ , state.pop("type" ) )
snake_case_ = component_class(**a__ )
setattr(self.backend_tokenizer , a__ , a__ )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else value
snake_case_ = value
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = kwargs.get("is_split_into_words" , a__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a__ , **a__ )
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = kwargs.get("is_split_into_words" , a__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ , a__=None ) -> int:
'''simple docstring'''
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 400
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCAmelCase_ : List[Any] = datasets.utils.logging.get_logger(__name__)
UpperCAmelCase_ : Dict = ['''names''', '''prefix''']
UpperCAmelCase_ : Tuple = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
UpperCAmelCase_ : Tuple = ['''encoding_errors''', '''on_bad_lines''']
UpperCAmelCase_ : int = ['''date_format''']
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase = ","
__UpperCamelCase = None
__UpperCamelCase = "infer"
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = "."
__UpperCamelCase = None
__UpperCamelCase = '"'
__UpperCamelCase = 0
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = 0
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = 1_0_0_0_0
__UpperCamelCase = None
__UpperCamelCase = "strict"
__UpperCamelCase = "error"
__UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
if self.delimiter is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.column_names
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __SCREAMING_SNAKE_CASE):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase = CsvConfig
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : str):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
SCREAMING_SNAKE_CASE_ : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple)):
SCREAMING_SNAKE_CASE_ : Optional[Any] = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE_ : Optional[int] = [files]
SCREAMING_SNAKE_CASE_ : str = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
SCREAMING_SNAKE_CASE_ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [files]
SCREAMING_SNAKE_CASE_ : List[Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files}))
return splits
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : pa.Table):
'''simple docstring'''
if self.config.features is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(__SCREAMING_SNAKE_CASE) for feature in self.config.features.values()):
# cheaper cast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__SCREAMING_SNAKE_CASE)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ : Dict = table_cast(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return pa_table
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__SCREAMING_SNAKE_CASE) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE)):
SCREAMING_SNAKE_CASE_ : Dict = pd.read_csv(__SCREAMING_SNAKE_CASE , iterator=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(__SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE_ : Dict = pa.Table.from_pandas(__SCREAMING_SNAKE_CASE)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__SCREAMING_SNAKE_CASE)
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE)}: {e}')
raise
| 716
|
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a , __a ) -> int | float:
"""simple docstring"""
if len(__a ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE_ : str = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE_ : int = find_max(__a , __a , __a ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE_ : str = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 176
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=False , __magic_name__ : Dict=False ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = 'backbone.' if is_semantic else ''
_lowerCAmelCase :Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", 'beit.embeddings.cls_token'),
(f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'),
(f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'),
(f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : int=False , __magic_name__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_lowerCAmelCase :str = 'backbone.' if is_semantic else ''
# queries, keys and values
_lowerCAmelCase :List[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
_lowerCAmelCase :List[str] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
_lowerCAmelCase :Optional[int] = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
_lowerCAmelCase :str = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase :str = q_bias
_lowerCAmelCase :Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase :Optional[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_lowerCAmelCase :Dict = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
_lowerCAmelCase :Tuple = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
_lowerCAmelCase :Optional[Any] = gamma_a
_lowerCAmelCase :Optional[int] = gamma_a
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase :Tuple = val
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase :Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Union[str, Any]=False ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = False if 'rvlcdip' in checkpoint_url else True
_lowerCAmelCase :str = BeitConfig(use_absolute_position_embeddings=_lowerCamelCase , use_mask_token=_lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_lowerCAmelCase :Union[str, Any] = 1024
_lowerCAmelCase :Optional[int] = 4096
_lowerCAmelCase :Optional[Any] = 24
_lowerCAmelCase :List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
_lowerCAmelCase :Optional[int] = 16
_lowerCAmelCase :Any = 'huggingface/label-files'
_lowerCAmelCase :Tuple = 'rvlcdip-id2label.json'
_lowerCAmelCase :Any = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase :List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase :Optional[Any] = idalabel
_lowerCAmelCase :List[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase :List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )['model']
_lowerCAmelCase :Optional[int] = create_rename_keys(_lowerCamelCase , has_lm_head=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , has_lm_head=_lowerCamelCase )
# load HuggingFace model
_lowerCAmelCase :Optional[Any] = BeitForMaskedImageModeling(_lowerCamelCase ) if has_lm_head else BeitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image
_lowerCAmelCase :Optional[int] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCamelCase )
_lowerCAmelCase :Tuple = prepare_img()
_lowerCAmelCase :Dict = image_processor(images=_lowerCamelCase , return_tensors='pt' )
_lowerCAmelCase :Any = encoding['pixel_values']
_lowerCAmelCase :str = model(_lowerCamelCase )
_lowerCAmelCase :Any = outputs.logits
# verify logits
_lowerCAmelCase :int = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCamelCase ), "Shape of logits not as expected"
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
if has_lm_head:
_lowerCAmelCase :Tuple = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
_lowerCAmelCase :Optional[Any] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
a = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 687
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a ( A__ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
_UpperCAmelCase =params
_UpperCAmelCase =np.array(_snake_case )
_UpperCAmelCase =np.array([len(_snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _snake_case ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def SCREAMING_SNAKE_CASE ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.params.max_model_input_size
_UpperCAmelCase =self.lengths > max_len
logger.info(F"Splitting {sum(_snake_case )} too long sequences." )
def divide_chunks(_snake_case , _snake_case ):
return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )]
_UpperCAmelCase =[]
_UpperCAmelCase =[]
if self.params.mlm:
_UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
_UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_UpperCAmelCase =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_UpperCAmelCase =np.insert(_snake_case , 0 , _snake_case )
if sub_s[-1] != sep_id:
_UpperCAmelCase =np.insert(_snake_case , len(_snake_case ) , _snake_case )
assert len(_snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_snake_case )
new_tok_ids.extend(_snake_case )
new_lengths.extend([len(_snake_case ) for l in sub_seqs] )
_UpperCAmelCase =np.array(_snake_case )
_UpperCAmelCase =np.array(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =len(self )
_UpperCAmelCase =self.lengths > 11
_UpperCAmelCase =self.token_ids[indices]
_UpperCAmelCase =self.lengths[indices]
_UpperCAmelCase =len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def SCREAMING_SNAKE_CASE ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
_UpperCAmelCase =self.params.special_tok_ids["unk_token"]
_UpperCAmelCase =len(self )
_UpperCAmelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_UpperCAmelCase =(unk_occs / self.lengths) < 0.5
_UpperCAmelCase =self.token_ids[indices]
_UpperCAmelCase =self.lengths[indices]
_UpperCAmelCase =len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def SCREAMING_SNAKE_CASE ( self ):
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =[t[0] for t in batch]
_UpperCAmelCase =[t[1] for t in batch]
assert len(_snake_case ) == len(_snake_case )
# Max for paddings
_UpperCAmelCase =max(_snake_case )
# Pad token ids
if self.params.mlm:
_UpperCAmelCase =self.params.special_tok_ids["pad_token"]
else:
_UpperCAmelCase =self.params.special_tok_ids["unk_token"]
_UpperCAmelCase =[list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids]
assert len(tk_ ) == len(_snake_case )
assert all(len(_snake_case ) == max_seq_len_ for t in tk_ )
_UpperCAmelCase =torch.tensor(tk_ ) # (bs, max_seq_len_)
_UpperCAmelCase =torch.tensor(_snake_case ) # (bs)
return tk_t, lg_t
| 408
| 0
|
from __future__ import annotations
def lowerCAmelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = [True] * limit
__magic_name__ : Dict = False
__magic_name__ : Optional[int] = False
__magic_name__ : Any = True
for i in range(3, int(limit**0.5 + 1 ), 2 ):
__magic_name__ : Tuple = i * 2
while index < limit:
__magic_name__ : List[str] = False
__magic_name__ : List[str] = index + i
__magic_name__ : List[str] = [2]
for i in range(3, UpperCAmelCase, 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase )
return primes
def lowerCAmelCase ( UpperCAmelCase = 100_0000 ) ->int:
"""simple docstring"""
__magic_name__ : Dict = prime_sieve(UpperCAmelCase )
__magic_name__ : Optional[Any] = 0
__magic_name__ : Optional[Any] = 0
for i in range(len(UpperCAmelCase ) ):
for j in range(i + length, len(UpperCAmelCase ) ):
__magic_name__ : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__magic_name__ : Any = j - i
__magic_name__ : Union[str, Any] = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 336
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16 ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__magic_name__ : Tuple = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Any = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : str = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Any = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Union[str, Any] = 8
else:
__magic_name__ : Optional[Any] = None
return tokenizer.pad(
UpperCAmelCase, padding='''longest''', max_length=UpperCAmelCase, pad_to_multiple_of=UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=UpperCAmelCase )
__magic_name__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : int = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : List[str] = int(config['''seed'''] )
__magic_name__ : Optional[int] = int(config['''batch_size'''] )
__magic_name__ : Optional[Any] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__magic_name__ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : int = get_dataloaders(UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Any = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : int = AdamW(params=model.parameters(), lr=UpperCAmelCase )
# Instantiate scheduler
__magic_name__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : Tuple = outputs.loss
__magic_name__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase, default=UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
__magic_name__ : Dict = parser.parse_args()
__magic_name__ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 336
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Optional[int] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "transfo-xl"
__UpperCAmelCase : List[Any] = ["mems"]
__UpperCAmelCase : Optional[Any] = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , lowerCamelCase : Tuple=267735 , lowerCamelCase : Optional[int]=[20000, 40000, 200000] , lowerCamelCase : Tuple=1024 , lowerCamelCase : str=1024 , lowerCamelCase : Dict=16 , lowerCamelCase : str=64 , lowerCamelCase : Optional[int]=4096 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : int=False , lowerCamelCase : Dict=18 , lowerCamelCase : Union[str, Any]=1600 , lowerCamelCase : Optional[Any]=1000 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[Any]=-1 , lowerCamelCase : Dict=True , lowerCamelCase : List[str]=0.1 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Tuple=True , lowerCamelCase : List[Any]="normal" , lowerCamelCase : int=0.01 , lowerCamelCase : Optional[int]=0.01 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : str=1E-5 , lowerCamelCase : Tuple=0 , **lowerCamelCase : List[str] , ) -> int:
__snake_case : str = vocab_size
__snake_case : Union[str, Any] = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
__snake_case : Dict = [False] + [True] * len(self.cutoffs )
else:
__snake_case : List[Any] = [False] + [False] * len(self.cutoffs )
__snake_case : List[Any] = d_model
__snake_case : List[str] = d_embed
__snake_case : str = d_head
__snake_case : Union[str, Any] = d_inner
__snake_case : Union[str, Any] = div_val
__snake_case : Optional[Any] = pre_lnorm
__snake_case : Any = n_layer
__snake_case : Union[str, Any] = n_head
__snake_case : Dict = mem_len
__snake_case : Union[str, Any] = same_length
__snake_case : Any = attn_type
__snake_case : Any = clamp_len
__snake_case : Any = sample_softmax
__snake_case : List[str] = adaptive
__snake_case : Optional[int] = dropout
__snake_case : List[Any] = dropatt
__snake_case : Tuple = untie_r
__snake_case : Tuple = init
__snake_case : Any = init_range
__snake_case : Any = proj_init_std
__snake_case : int = init_std
__snake_case : Union[str, Any] = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Optional[int] ) -> str:
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __snake_case ( self : List[str] , lowerCamelCase : Any ) -> Any:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 81
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Dict = VQModel
lowerCAmelCase : List[str] = '''sample'''
@property
def lowerCAmelCase ( self : List[Any] , _lowercase : Tuple=(32, 32) ):
"""simple docstring"""
_UpperCamelCase: List[Any] = 4
_UpperCamelCase: Tuple = 3
_UpperCamelCase: List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_lowercase )
return {"sample": image}
@property
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : str ):
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Any = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
_UpperCamelCase: Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: int = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowercase )
_UpperCamelCase: Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Dict = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_lowercase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCamelCase: int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCamelCase: Tuple = image.to(_lowercase )
with torch.no_grad():
_UpperCamelCase: Optional[int] = model(_lowercase ).sample
_UpperCamelCase: List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCamelCase: Any = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3 ) )
| 271
| 0
|
"""simple docstring"""
__UpperCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 700
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Dict = CLIPTokenizer
__lowerCamelCase : Optional[Any] = CLIPTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = False
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
a__ : Tuple = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Union[str, Any] = {"unk_token": "<unk>"}
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def UpperCAmelCase ( self : Optional[Any] , **a_ : Tuple ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , **a_ : Any ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , a_ : Dict ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = "lower newer"
a__ : Dict = "lower newer"
return input_text, output_text
def UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : Optional[Any] = "lower newer"
a__ : Tuple = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Tuple = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
a__ : List[str] = tokens + [tokenizer.unk_token]
a__ : str = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Optional[int] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : str = tokenizer_s.tokenize(a_ )
a__ : int = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Dict = "xa\u0303y" + " " + "x\xe3y"
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Optional[int] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : str = tokenizer_s.tokenize(a_ )
a__ : List[Any] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of line break type
a__ : int = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Dict = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Union[str, Any] = F"{text_of_1_token} {text_of_1_token}"
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : List[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a__ : List[Any] = F" {text}"
a__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : Tuple = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
pass
| 251
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ : Dict = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 73
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : int = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73
| 1
|
"""simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 712
|
"""simple docstring"""
import torch
from torch import nn
class a ( nn.Module ):
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Tuple=False ):
super().__init__()
_UpperCAmelCase = n_token
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_proj
_UpperCAmelCase = cutoffs + [n_token]
_UpperCAmelCase = [0] + self.cutoffs
_UpperCAmelCase = div_val
_UpperCAmelCase = self.cutoffs[0]
_UpperCAmelCase = len(self.cutoffs ) - 1
_UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
_UpperCAmelCase = keep_order
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
if proj is None:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Union[str, Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase = hidden[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
_UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_UpperCAmelCase = labels != -100
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
_UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
_UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
_UpperCAmelCase = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] ):
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase = logprob_i
return out
| 275
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.