code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
if index == r:
for j in range(SCREAMING_SNAKE_CASE_ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_lowercase = arr[i]
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , SCREAMING_SNAKE_CASE_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
# A temporary array to store all combination one by one
_lowercase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
A : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 287
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a_ ( _a ):
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a_ ( _a ):
a : np.ndarray
a : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 287
| 1
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__lowerCamelCase : Optional[int] = logging.getLogger()
def lowercase__ ( __A: Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[int] = os.path.join(__A ,'''all_results.json''' )
if os.path.exists(__A ):
with open(__A ,'''r''' ) as f:
__magic_name__ : int = json.load(__A )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
__lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
import xla_spawn
__magic_name__ : List[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ : List[str] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_ ):
__magic_name__ : Any = time()
xla_spawn.main()
__magic_name__ : List[str] = time()
__magic_name__ : List[Any] = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
import xla_spawn
__magic_name__ : Union[str, Any] = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_ ):
xla_spawn.main()
| 501
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : int = NewType('''DataClass''', Any)
__lowerCamelCase : Optional[Any] = NewType('''DataClassType''', Any)
def lowercase__ ( __A: List[Any] ):
'''simple docstring'''
if isinstance(__A ,__A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowercase__ ( __A: list ):
'''simple docstring'''
__magic_name__ : Dict = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A ,__A )
def lowercase__ ( *,
__A: Union[str, List[str]] = None ,__A: str = None ,__A: Any = dataclasses.MISSING ,__A: Callable[[], Any] = dataclasses.MISSING ,__A: dict = None ,**__A: Optional[int] ,):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__magic_name__ : Optional[Any] = {}
if aliases is not None:
__magic_name__ : str = aliases
if help is not None:
__magic_name__ : Optional[int] = help
return dataclasses.field(metadata=__A ,default=__A ,default_factory=__A ,**__A )
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =42
def __init__( self : List[str] , lowerCamelCase_ : Union[DataClassType, Iterable[DataClassType]] , **lowerCamelCase_ : str ) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__magic_name__ : List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase_ )
if dataclasses.is_dataclass(lowerCamelCase_ ):
__magic_name__ : Union[str, Any] = [dataclass_types]
__magic_name__ : Tuple = list(lowerCamelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase_ )
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ : ArgumentParser , lowerCamelCase_ : dataclasses.Field ) -> str:
__magic_name__ : int = F'''--{field.name}'''
__magic_name__ : str = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__magic_name__ : Union[str, Any] = kwargs.pop('''aliases''' , [] )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Tuple = [aliases]
__magic_name__ : Optional[int] = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(lowerCamelCase_ , '''UnionType''' ) and isinstance(lowerCamelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCamelCase_ ) not in field.type.__args__:
# filter `str` in Union
__magic_name__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__magic_name__ : str = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__magic_name__ : List[str] = (
field.type.__args__[0] if isinstance(lowerCamelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__magic_name__ : Union[str, Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__magic_name__ : Any = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase_ ) and issubclass(field.type , lowerCamelCase_ )):
if origin_type is Literal:
__magic_name__ : Optional[int] = field.type.__args__
else:
__magic_name__ : Dict = [x.value for x in field.type]
__magic_name__ : Union[str, Any] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__magic_name__ : List[Any] = field.default
else:
__magic_name__ : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__magic_name__ : Union[str, Any] = copy(lowerCamelCase_ )
# Hack because type=bool in argparse does not behave as we want.
__magic_name__ : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__magic_name__ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__magic_name__ : int = default
# This tells argparse we accept 0 or 1 value after --field_name
__magic_name__ : Tuple = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__magic_name__ : Any = True
elif isclass(lowerCamelCase_ ) and issubclass(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Tuple = field.type.__args__[0]
__magic_name__ : List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__magic_name__ : int = field.default_factory()
elif field.default is dataclasses.MISSING:
__magic_name__ : str = True
else:
__magic_name__ : Tuple = field.type
if field.default is not dataclasses.MISSING:
__magic_name__ : str = field.default
elif field.default_factory is not dataclasses.MISSING:
__magic_name__ : Any = field.default_factory()
else:
__magic_name__ : Any = True
parser.add_argument(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__magic_name__ : Dict = False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase_ : DataClassType ) -> Optional[int]:
if hasattr(lowerCamelCase_ , '''_argument_group_name''' ):
__magic_name__ : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
__magic_name__ : Any = self
try:
__magic_name__ : Dict[str, type] = get_type_hints(lowerCamelCase_ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase_ ):
__magic_name__ : Any = '''.'''.join(map(lowerCamelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase_ ):
if not field.init:
continue
__magic_name__ : Tuple = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__magic_name__ : int = []
if args_filename:
args_files.append(Path(lowerCamelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__magic_name__ : str = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase_ , type=lowerCamelCase_ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__magic_name__ , __magic_name__ : List[str] = args_file_parser.parse_known_args(args=lowerCamelCase_ )
__magic_name__ : List[Any] = vars(lowerCamelCase_ ).get(args_file_flag.lstrip('''-''' ) , lowerCamelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase_ ) for p in cmd_args_file_paths] )
__magic_name__ : List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__magic_name__ : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
__magic_name__ , __magic_name__ : Tuple = self.parse_known_args(args=lowerCamelCase_ )
__magic_name__ : Any = []
for dtype in self.dataclass_types:
__magic_name__ : str = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
__magic_name__ : Tuple = {k: v for k, v in vars(lowerCamelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : Dict[str, Any] , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__magic_name__ : int = set(args.keys() )
__magic_name__ : Any = []
for dtype in self.dataclass_types:
__magic_name__ : int = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
__magic_name__ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__magic_name__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase_ )}''' )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(lowerCamelCase_ ) , encoding='''utf-8''' ) as open_json_file:
__magic_name__ : Any = json.loads(open_json_file.read() )
__magic_name__ : Tuple = self.parse_dict(lowerCamelCase_ , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__magic_name__ : Any = self.parse_dict(yaml.safe_load(Path(lowerCamelCase_ ).read_text() ) , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 501
| 1
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
SCREAMING_SNAKE_CASE : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
SCREAMING_SNAKE_CASE : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCamelCase ( _a , _a ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_a ) - np.asarray(_a )) ** 2 ) )
def UpperCamelCase ( _a , _a ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_a , _a ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCamelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 257
|
def UpperCamelCase ( _a = 1 , _a = 1_0_0_0 ) -> int:
'''simple docstring'''
lowercase_ :str = 1
lowercase_ :Union[str, Any] = 0
for divide_by_number in range(_a , digit + 1 ):
lowercase_ :list[int] = []
lowercase_ :Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_a ):
lowercase_ :Optional[Any] = len(_a )
lowercase_ :str = divide_by_number
else:
has_been_divided.append(_a )
lowercase_ :str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
_lowerCamelCase : Tuple = self.transformer_dir
shutil.copy(
os.path.join(lowercase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def A_ ( self ):
_lowerCamelCase : str = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def A_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
_lowerCamelCase : Tuple = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowerCamelCase : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowerCamelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCamelCase : str = black.format_str(lowercase , mode=lowercase )
_lowerCamelCase : Tuple = os.path.join(self.transformer_dir , 'new_code.py' )
with open(lowercase , 'w' , newline='\n' ) as f:
f.write(lowercase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase )
with open(lowercase , 'r' ) as f:
self.assertTrue(f.read() , lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(lowercase , lowercase )
def A_ ( self ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowercase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowercase ) , )
# Copy consistency with a really long name
_lowerCamelCase : Dict = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowercase , lowercase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowercase , overwrite_result=re.sub('Bert' , 'TestModel' , lowercase ) , )
def A_ ( self ):
_lowerCamelCase : List[Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
_lowerCamelCase : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_lowerCamelCase : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowerCamelCase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = check_copies.convert_to_localized_md(
lowercase , lowercase , localized_readme['format_model_list'] )
self.assertFalse(lowercase )
self.assertEqual(lowercase , lowercase )
_lowerCamelCase, _lowerCamelCase : List[str] = check_copies.convert_to_localized_md(
lowercase , lowercase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase )
_lowerCamelCase : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_lowerCamelCase : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowerCamelCase : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowerCamelCase, _lowerCamelCase : int = check_copies.convert_to_localized_md(
lowercase , lowercase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(lowercase , lowercase )
| 492
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = abs(lowercase__ )
_lowerCamelCase : Optional[int] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = abs(lowercase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _snake_case ( lowercase__ ):
return sum(int(lowercase__ ) for c in str(abs(lowercase__ ) ) )
def _snake_case ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase__ , lowercase__ ) -> None:
_lowerCamelCase : int = f'''{func.__name__}({value})'''
_lowerCamelCase : Optional[Any] = timeit(f'''__main__.{call}''' , setup='import __main__' )
print(f'''{call:56} = {func(lowercase__ )} -- {timing:.4f} seconds''' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase__ , lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 492
| 1
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__SCREAMING_SNAKE_CASE : Optional[int] = getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : int = 8 , lowercase_ : str = DEFAULT_DEVICE , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple="summarization" , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Dict:
_lowerCamelCase = Path(lowercase_ ).open('''w''' , encoding='''utf-8''' )
_lowerCamelCase = str(lowercase_ )
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(lowercase_ ).to(lowercase_ )
if fpaa:
_lowerCamelCase = model.half()
_lowerCamelCase = AutoTokenizer.from_pretrained(lowercase_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(lowercase_ , lowercase_ )
if prefix is None:
_lowerCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(lowercase_ , lowercase_ ) ) ):
_lowerCamelCase = [prefix + text for text in examples_chunk]
_lowerCamelCase = tokenizer(lowercase_ , return_tensors='''pt''' , truncation=lowercase_ , padding='''longest''' ).to(lowercase_ )
_lowerCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowercase_ , )
_lowerCamelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_lowerCamelCase = int(time.time() - start_time ) # seconds
_lowerCamelCase = len(lowercase_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCAmelCase_( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCAmelCase_( lowercase_ : List[Any]=True ) -> List[str]:
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=lowercase_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=lowercase_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=lowercase_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=lowercase_ , required=lowercase_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=lowercase_ , required=lowercase_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=lowercase_ , required=lowercase_ , default=lowercase_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=lowercase_ , required=lowercase_ , default=lowercase_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=lowercase_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowercase_ , default=8 , required=lowercase_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=lowercase_ , default=-1 , required=lowercase_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=lowercase_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_lowerCamelCase , _lowerCamelCase = parser.parse_known_args()
_lowerCamelCase = parse_numeric_n_bool_cl_kwargs(lowercase_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
_lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowercase_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_lowerCamelCase = generate_summaries_or_translations(
lowercase_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowercase_ , )
if args.reference_path is None:
return {}
# Compute scores
_lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
_lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
_lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowercase_ )]
_lowerCamelCase = score_fn(lowercase_ , lowercase_ )
scores.update(lowercase_ )
if args.dump_args:
scores.update(lowercase_ )
if args.info:
_lowerCamelCase = args.info
if verbose:
print(lowercase_ )
if args.score_path is not None:
json.dump(lowercase_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 661
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=2 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=None , lowerCamelCase__=2 , lowerCamelCase__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = patch_size
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase = frequency_out_dimension * time_out_dimension
_lowerCamelCase = num_patches + 2
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, input_values, labels
def snake_case__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self ):
_lowerCamelCase = ASTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase = prepare_audio()
_lowerCamelCase = audio.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661
| 1
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Optional[Any] = "bart"
UpperCamelCase_ : Optional[Any] = ["past_key_values"]
UpperCamelCase_ : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a=5_02_65 , a=10_24 , a=12 , a=40_96 , a=16 , a=12 , a=40_96 , a=16 , a=0.0 , a=0.0 , a="gelu" , a=10_24 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=0.0 , a=False , a=True , a=3 , a=1 , a=0 , a=2 , a=True , a=2 , a=2 , **a , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = classifier_dropout
_UpperCamelCase = use_cache
_UpperCamelCase = encoder_layers
_UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a , pad_token_id=a , bos_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , a ):
_UpperCamelCase = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
class lowerCAmelCase__ ( __lowercase ):
@property
def A_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_UpperCamelCase = {0: """batch"""}
_UpperCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_UpperCamelCase = {0: """batch""", 1: """decoder_sequence"""}
_UpperCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(a , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_UpperCamelCase , _UpperCamelCase = self.num_layers
for i in range(a ):
_UpperCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_UpperCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_UpperCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase = super().outputs
else:
_UpperCamelCase = super(a , self ).outputs
if self.use_past:
_UpperCamelCase , _UpperCamelCase = self.num_layers
for i in range(a ):
_UpperCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_UpperCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A_ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
# Generate decoder inputs
_UpperCamelCase = seq_length if not self.use_past else 1
_UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
_UpperCamelCase = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_UpperCamelCase = dict(**a , **a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_UpperCamelCase , _UpperCamelCase = common_inputs["""input_ids"""].shape
_UpperCamelCase = common_inputs["""decoder_input_ids"""].shape[1]
_UpperCamelCase , _UpperCamelCase = self.num_attention_heads
_UpperCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase = decoder_seq_length + 3
_UpperCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCamelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(a , a )] , dim=1 )
_UpperCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCamelCase , _UpperCamelCase = self.num_layers
_UpperCamelCase = min(a , a )
_UpperCamelCase = max(a , a ) - min_num_layers
_UpperCamelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(a ):
common_inputs["past_key_values"].append(
(
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
) )
# TODO: test this.
_UpperCamelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(a , a ):
common_inputs["past_key_values"].append((torch.zeros(a ), torch.zeros(a )) )
return common_inputs
def A_ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_UpperCamelCase , _UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_UpperCamelCase = seqlen + 2
_UpperCamelCase , _UpperCamelCase = self.num_layers
_UpperCamelCase , _UpperCamelCase = self.num_attention_heads
_UpperCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase = common_inputs["""attention_mask"""].dtype
_UpperCamelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(a , a , dtype=a )] , dim=1 )
_UpperCamelCase = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(a )
]
return common_inputs
def A_ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCamelCase = tokenizer.num_special_tokens_to_add(a )
_UpperCamelCase = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
_UpperCamelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCamelCase = dict(tokenizer(a , return_tensors=a ) )
return common_inputs
def A_ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
elif self.task == "causal-lm":
_UpperCamelCase = self._generate_dummy_inputs_for_causal_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
else:
_UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
return common_inputs
def A_ ( self , a , a , a , a ) -> Optional[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase = super()._flatten_past_key_values_(a , a , a , a )
else:
_UpperCamelCase = super(a , self )._flatten_past_key_values_(
a , a , a , a )
| 202
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["YolosFeatureExtractor"]
lowerCamelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202
| 1
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Dict="pt" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = {'''add_prefix_space''': True} if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not line.startswith(''' ''' ) else {}
UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = input_ids.ne(SCREAMING_SNAKE_CASE__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase__ ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , a__ : Dict , a__ : int , a__ : Tuple , a__ : int , a__ : Tuple="train" , a__ : Dict=None , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int="" , ):
super().__init__()
UpperCAmelCase = Path(_UpperCamelCase ).joinpath(type_path + '''.source''' )
UpperCAmelCase = Path(_UpperCamelCase ).joinpath(type_path + '''.target''' )
UpperCAmelCase = self.get_char_lens(self.src_file )
UpperCAmelCase = max_source_length
UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
UpperCAmelCase = tokenizer
UpperCAmelCase = prefix
if n_obs is not None:
UpperCAmelCase = self.src_lens[:n_obs]
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
def __len__( self : List[str] ):
return len(self.src_lens )
def __getitem__( self : Tuple , a__ : Tuple ):
UpperCAmelCase = index + 1 # linecache starts at 1
UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , _UpperCamelCase ).rstrip('''\n''' )
UpperCAmelCase = linecache.getline(str(self.tgt_file ) , _UpperCamelCase ).rstrip('''\n''' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
)
UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
UpperCAmelCase = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , '''right''' )
UpperCAmelCase = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , '''right''' )
UpperCAmelCase = source_inputs['''input_ids'''].squeeze()
UpperCAmelCase = target_inputs['''input_ids'''].squeeze()
UpperCAmelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( a__ : Tuple ):
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def __snake_case ( self : Tuple , a__ : Optional[Any] ):
UpperCAmelCase = torch.stack([x['''input_ids'''] for x in batch] )
UpperCAmelCase = torch.stack([x['''attention_mask'''] for x in batch] )
UpperCAmelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase = trim_batch(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase, UpperCAmelCase = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase )
UpperCAmelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
a__ : List[Any] = getLogger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> int:
"""simple docstring"""
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = get_git_info()
save_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''git_log.json''' ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any=4 , **SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def __snake_case ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = {
'''repo_id''': str(SCREAMING_SNAKE_CASE__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
return list(map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , SCREAMING_SNAKE_CASE__ )
def white_space_fix(SCREAMING_SNAKE_CASE_ : Dict ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ : Tuple ):
UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE__ ) ) ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = normalize_answer(SCREAMING_SNAKE_CASE__ ).split()
UpperCAmelCase = normalize_answer(SCREAMING_SNAKE_CASE__ ).split()
UpperCAmelCase = Counter(SCREAMING_SNAKE_CASE__ ) & Counter(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase = 1.0 * num_same / len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = 1.0 * num_same / len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
return normalize_answer(SCREAMING_SNAKE_CASE__ ) == normalize_answer(SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
em += exact_match_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
em /= len(SCREAMING_SNAKE_CASE__ )
return {"em": em}
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase = '''dropout_rate'''
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not hasattr(SCREAMING_SNAKE_CASE__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(SCREAMING_SNAKE_CASE__ ) )
delattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
UpperCAmelCase = p if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
delattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return hparams, config
| 51
|
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCAmelCase = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
_UpperCAmelCase = """path-to-your-trained-model"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCAmelCase = pipe.to(device)
# to channels last
_UpperCAmelCase = pipe.unet.to(memory_format=torch.channels_last)
_UpperCAmelCase = pipe.vae.to(memory_format=torch.channels_last)
_UpperCAmelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCAmelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCAmelCase = torch.randn(2, 4, 6_4, 6_4)
_UpperCAmelCase = torch.rand(1) * 9_9_9
_UpperCAmelCase = torch.randn(2, 7_7, 7_6_8)
_UpperCAmelCase = (sample, timestep, encoder_hidden_status)
try:
_UpperCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCAmelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCAmelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCAmelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCAmelCase = 6_6_6
_UpperCAmelCase = torch.Generator(device).manual_seed(seed)
_UpperCAmelCase = {"""generator""": generator}
if args.steps is not None:
_UpperCAmelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCAmelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase__ ( self , _snake_case=None , _snake_case=None , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = {}
if prompt is not None:
lowerCAmelCase = prompt
if generate_kwargs is not None:
lowerCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
lowerCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _snake_case , **_snake_case ):
"""simple docstring"""
return super().__call__(_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = load_image(_snake_case )
if prompt is not None:
if not isinstance(_snake_case , _snake_case ):
raise ValueError(
F'Received an invalid text input, got - {type(_snake_case )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
lowerCAmelCase = self.model.config.model_type
if model_type == "git":
lowerCAmelCase = self.image_processor(images=_snake_case , return_tensors=self.framework )
lowerCAmelCase = self.tokenizer(text=_snake_case , add_special_tokens=_snake_case ).input_ids
lowerCAmelCase = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase = torch.tensor(_snake_case ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase = self.image_processor(images=_snake_case , header_text=_snake_case , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase = self.image_processor(images=_snake_case , return_tensors=self.framework )
lowerCAmelCase = self.tokenizer(_snake_case , return_tensors=self.framework )
model_inputs.update(_snake_case )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
lowerCAmelCase = self.image_processor(images=_snake_case , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase = None
return model_inputs
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _snake_case )
and all(x is None for x in model_inputs['input_ids'] )
):
lowerCAmelCase = None
if generate_kwargs is None:
lowerCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase = self.model.generate(_snake_case , **_snake_case , **_snake_case )
return model_outputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for output_ids in model_outputs:
lowerCAmelCase = {
'generated_text': self.tokenizer.decode(
_snake_case , skip_special_tokens=_snake_case , )
}
records.append(_snake_case )
return records
| 4
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4
| 1
|
from __future__ import annotations
import requests
__lowerCamelCase = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _snake_case ( __snake_case , __snake_case = 1 , __snake_case = "new" , __snake_case = None ) -> dict:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
UpperCAmelCase_ : int = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__snake_case )
UpperCAmelCase_ : List[Any] = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
UpperCAmelCase_ : List[str] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
UpperCAmelCase_ : Dict = {}
for id_ in range(__snake_case ):
UpperCAmelCase_ : Tuple = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 455
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
__lowerCamelCase = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
__lowerCamelCase = '''>>zh<<'''
__lowerCamelCase = '''Helsinki-NLP/'''
if is_torch_available():
__lowerCamelCase = '''pt'''
elif is_tf_available():
__lowerCamelCase = '''tf'''
else:
__lowerCamelCase = '''jax'''
@require_sentencepiece
class snake_case_ (lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MarianTokenizer
_lowerCamelCase = False
_lowerCamelCase = True
def A_ ( self):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Union[str, Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowercase ,range(len(lowercase))))
UpperCAmelCase_ : int = Path(self.tmpdirname)
save_json(lowercase ,save_dir / VOCAB_FILES_NAMES["vocab"])
save_json(lowercase ,save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase ,save_dir / VOCAB_FILES_NAMES["source_spm"])
copyfile(lowercase ,save_dir / VOCAB_FILES_NAMES["target_spm"])
UpperCAmelCase_ : str = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A_ ( self ,**lowercase):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname ,**lowercase)
def A_ ( self ,lowercase):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "</s>"
UpperCAmelCase_ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) ,lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) ,lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,"</s>")
self.assertEqual(vocab_keys[1] ,"<unk>")
self.assertEqual(vocab_keys[-1] ,"<pad>")
self.assertEqual(len(lowercase) ,9)
def A_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,9)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""")
UpperCAmelCase_ : Optional[int] = en_de_tokenizer(["I am a small frog"] ,return_tensors=lowercase)
self.assertIsInstance(lowercase ,lowercase)
UpperCAmelCase_ : Union[str, Any] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(lowercase ,batch.input_ids[0])
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase)
UpperCAmelCase_ : Any = [x.name for x in Path(lowercase).glob("*")]
self.assertIn("source.spm" ,lowercase)
MarianTokenizer.from_pretrained(lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = tok(
["I am a small frog" * 1000, "I am a small frog"] ,padding=lowercase ,truncation=lowercase ,return_tensors=lowercase)
self.assertIsInstance(lowercase ,lowercase)
self.assertEqual(batch.input_ids.shape ,(2, 512))
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = tok(["I am a tiny frog", "I am a small frog"] ,padding=lowercase ,return_tensors=lowercase)
self.assertIsInstance(lowercase ,lowercase)
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10))
@slow
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Dict = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase ,model_name="Helsinki-NLP/opus-mt-en-de" ,revision="1a8c2263da11e68e50938f97e10cd57820bd504c" ,decode_kwargs={"use_source_tokenizer": True} ,)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Dict = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs")
UpperCAmelCase_ : Any = "Tämä on testi"
UpperCAmelCase_ : List[str] = "This is a test"
UpperCAmelCase_ : int = [76, 7, 2047, 2]
UpperCAmelCase_ : Any = [69, 12, 11, 940, 2]
UpperCAmelCase_ : Any = tokenizer(lowercase).input_ids
self.assertListEqual(lowercase ,lowercase)
UpperCAmelCase_ : Any = tokenizer(text_target=lowercase).input_ids
self.assertListEqual(lowercase ,lowercase)
UpperCAmelCase_ : List[Any] = tokenizer.decode(lowercase ,skip_special_tokens=lowercase)
self.assertEqual(lowercase ,lowercase)
| 455
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ = "src/diffusers"
UpperCAmelCase__ = "."
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase__ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase__ = spec.loader.load_module()
def _a ( a :str , a :List[str] ) -> List[Any]:
return line.startswith(a ) or len(a ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , a ) is not None
def _a ( a :Optional[int] ) -> Any:
a = object_name.split('''.''' )
a = 0
# First let's find the module where our object lives.
a = parts[i]
while i < len(a ) and not os.path.isfile(os.path.join(a , F"""{module}.py""" ) ):
i += 1
if i < len(a ):
a = os.path.join(a , parts[i] )
if i >= len(a ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(a , F"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a = f.readlines()
# Now let's find the class / func in the code!
a = ''''''
a = 0
for name in parts[i + 1 :]:
while (
line_index < len(a ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(a ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a = line_index
while line_index < len(a ) and _should_continue(lines[line_index] , a ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a = lines[start_index:line_index]
return "".join(a )
UpperCAmelCase__ = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
UpperCAmelCase__ = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
UpperCAmelCase__ = re.compile(R"<FILL\s+[^>]*>")
def _a ( a :int ) -> Union[str, Any]:
a = code.split('''\n''' )
a = 0
while idx < len(a ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(a ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def _a ( a :List[Any] ) -> List[Any]:
a = len(get_indent(a ) ) > 0
if has_indent:
a = F"""class Bla:\n{code}"""
a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=a )
a = black.format_str(a , mode=a )
a , a = style_docstrings_in_code(a )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def _a ( a :int , a :Dict=False ) -> Tuple:
with open(a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a = f.readlines()
a = []
a = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(a ):
a = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a , a , a = search.groups()
a = find_code_in_diffusers(a )
a = get_indent(a )
a = line_index + 1 if indent == theoretical_indent else line_index + 2
a = theoretical_indent
a = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a = True
while line_index < len(a ) and should_continue:
line_index += 1
if line_index >= len(a ):
break
a = lines[line_index]
a = _should_continue(a , a ) and re.search(F"""^{indent}# End copy""" , a ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a = lines[start_index:line_index]
a = ''''''.join(a )
# Remove any nested `Copied from` comments to avoid circular copies
a = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(a ) is None]
a = '''\n'''.join(a )
# Before comparing, use the `replace_pattern` on the original code.
if len(a ) > 0:
a = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
a = [_re_replace_pattern.search(a ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a , a , a = pattern.groups()
a = re.sub(a , a , a )
if option.strip() == "all-casing":
a = re.sub(obja.lower() , obja.lower() , a )
a = re.sub(obja.upper() , obja.upper() , a )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a = blackify(lines[start_index - 1] + theoretical_code )
a = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a = lines[:start_index] + [theoretical_code] + lines[line_index:]
a = start_index + 1
if overwrite and len(a ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(a )
return diffs
def _a ( a :bool = False ) -> List[Any]:
a = glob.glob(os.path.join(a , '''**/*.py''' ) , recursive=a )
a = []
for filename in all_files:
a = is_copy_consistent(a , a )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(a ) > 0:
a = '''\n'''.join(a )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 117
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
UpperCAmelCase__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = ['''input_ids''', '''attention_mask''']
__snake_case = []
__snake_case = []
def __init__( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : Union[str, Any]="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Tuple="<mask>" , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[Dict[str, Any]] = None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=False , **__UpperCAmelCase : Dict , ) ->List[str]:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
a = legacy_behaviour
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , )
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a = 1
a = len(self.sp_model )
a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
a = {v: k for k, v in self.lang_code_to_id.items()}
a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a = src_lang if src_lang is not None else '''eng_Latn'''
a = self.lang_code_to_id[self._src_lang]
a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) ->Any:
"""simple docstring"""
a = self.__dict__.copy()
a = None
a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , __UpperCAmelCase : Optional[Any] ) ->int:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
a = [1] * len(self.prefix_tokens )
a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : Tuple ) ->Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a = src_lang
a = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
a = self.convert_tokens_to_ids(__UpperCAmelCase )
a = tgt_lang_id
return inputs
def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ) ->List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : int ) ->Optional[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] ) ->List[Any]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "eng_Latn" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "fra_Latn" , **__UpperCAmelCase : Optional[int] , ) ->BatchEncoding:
"""simple docstring"""
a = src_lang
a = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Tuple ) ->None:
"""simple docstring"""
a = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a = []
a = [self.eos_token_id, self.cur_lang_code]
else:
a = [self.cur_lang_code]
a = [self.eos_token_id]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
a = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a = []
a = [self.eos_token_id, self.cur_lang_code]
else:
a = [self.cur_lang_code]
a = [self.eos_token_id]
| 117
| 1
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase_ = True
except (ImportError, AttributeError):
lowercase_ = object
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
pass
lowercase_ = False
lowercase_ = logging.get_logger("""transformers-cli/serving""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(snake_case , args.host , args.port , args.workers )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( _A : ArgumentParser ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=_A , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=_A , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=_A , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=_A , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=_A , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=_A , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=_A , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=_A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=_A )
def __init__( self : Optional[int] , _A : Pipeline , _A : str , _A : int , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline
__SCREAMING_SNAKE_CASE : str = host
__SCREAMING_SNAKE_CASE : Optional[int] = port
__SCREAMING_SNAKE_CASE : Optional[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
__SCREAMING_SNAKE_CASE : List[str] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=_A , response_class=_A , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=_A , response_class=_A , methods=['''POST'''] , ),
] , timeout=600 , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase__ ( self : List[str] , _A : str = Body(_A , embed=_A ) , _A : bool = Body(_A , embed=_A ) ):
"""simple docstring"""
try:
__SCREAMING_SNAKE_CASE : List[Any] = self._pipeline.tokenizer.tokenize(_A )
if return_ids:
__SCREAMING_SNAKE_CASE : Any = self._pipeline.tokenizer.convert_tokens_to_ids(_A )
return ServeTokenizeResult(tokens=_A , tokens_ids=_A )
else:
return ServeTokenizeResult(tokens=_A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_A )} )
def UpperCAmelCase__ ( self : Tuple , _A : List[int] = Body(_A , embed=_A ) , _A : bool = Body(_A , embed=_A ) , _A : bool = Body(_A , embed=_A ) , ):
"""simple docstring"""
try:
__SCREAMING_SNAKE_CASE : str = self._pipeline.tokenizer.decode(_A , _A , _A )
return ServeDeTokenizeResult(model='''''' , text=_A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_A )} )
async def UpperCAmelCase__ ( self : Dict , _A : Optional[int]=Body(_A , embed=_A ) ):
"""simple docstring"""
if len(_A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__SCREAMING_SNAKE_CASE : Dict = self._pipeline(_A )
return ServeForwardResult(output=_A )
except Exception as e:
raise HTTPException(500 , {'''error''': str(_A )} )
| 713
|
import pprint
import requests
lowercase_ = """https://zenquotes.io/api"""
def a__ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def a__ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowercase_ = random_quotes()
pprint.pprint(response)
| 131
| 0
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=13 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple=99 , __UpperCamelCase : Tuple=32 , __UpperCamelCase : int=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[str]=512 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Any=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : str=4 , ) -> Union[str, Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
def __UpperCamelCase ( self : Optional[int] ) -> Any:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__UpperCamelCase , )
return config, input_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
A = FlaxDistilBertModelTester(self )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('distilbert-base-uncased' )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
A = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
A = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
A = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = (1, 11, 768)
self.assertEqual(output.shape , __UpperCamelCase )
A = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 ) )
| 106
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
snake_case_ : Any = "\\n Text data.\n Second line of data."
snake_case_ : Union[str, Any] = "file"
@pytest.fixture(scope='''session''' )
def A (__A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
UpperCAmelCase_ = bytes(__A , '''utf-8''' )
with zstd.open(__A , '''wb''' ) as f:
f.write(__A )
return path
@pytest.fixture
def A (__A : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __A ) , '''w''' ) as f:
f.write(__A )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def A (__A : Union[str, Any] , __A : Tuple , __A : Union[str, Any] , __A : Optional[Any] , __A : Any , __A : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
UpperCAmelCase_ = input_paths[compression_format]
UpperCAmelCase_ = tmp_path / '''cache'''
UpperCAmelCase_ = DownloadConfig(cache_dir=__A , extract_compressed_file=__A )
UpperCAmelCase_ = cached_path(__A , download_config=__A )
with open(__A ) as f:
UpperCAmelCase_ = f.read()
with open(__A ) as f:
UpperCAmelCase_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def A (__A : int , __A : Any , __A : Optional[Any] , __A : int , __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = '''custom_cache'''
UpperCAmelCase_ = '''custom_extracted_dir'''
UpperCAmelCase_ = tmp_path / '''custom_extracted_path'''
if default_extracted:
UpperCAmelCase_ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __A )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__A ) )
UpperCAmelCase_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase_ = xz_file
UpperCAmelCase_ = (
DownloadConfig(extract_compressed_file=__A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__A )
)
UpperCAmelCase_ = cached_path(__A , download_config=__A )
assert Path(__A ).parent.parts[-2:] == expected
def A (__A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = str(Path(__A ).resolve() )
assert cached_path(__A ) == text_file
# relative path
UpperCAmelCase_ = str(Path(__A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__A ) == text_file
def A (__A : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__A ):
cached_path(__A )
# relative path
UpperCAmelCase_ = '''./__missing_file__.txt'''
with pytest.raises(__A ):
cached_path(__A )
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__A ) as f:
UpperCAmelCase_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def A () -> List[str]:
"""simple docstring"""
with pytest.raises(__A ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def A (__A : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
http_get('''https://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def A (__A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
ftp_get('''ftp://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def A (__A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
fsspec_get('''s3://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
fsspec_head('''s3://huggingface.co''' )
| 720
|
from ...processing_utils import ProcessorMixin
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = '''WhisperFeatureExtractor'''
UpperCAmelCase__ : Union[str, Any] = '''WhisperTokenizer'''
def __init__( self : str , _snake_case : int , _snake_case : Any):
"""simple docstring"""
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def lowerCamelCase ( self : int , _snake_case : List[Any]=None , _snake_case : Dict=None , _snake_case : Union[str, Any]=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case)
def __call__( self : List[str] , *_snake_case : Union[str, Any] , **_snake_case : str):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case)
UpperCAmelCase_ = kwargs.pop('''audio''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''sampling_rate''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''text''' , _snake_case)
if len(_snake_case) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case)
if text is not None:
UpperCAmelCase_ = self.tokenizer(_snake_case , **_snake_case)
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings['''input_ids''']
return inputs
def lowerCamelCase ( self : str , *_snake_case : Tuple , **_snake_case : List[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Dict , *_snake_case : List[Any] , **_snake_case : List[str]):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , _snake_case : str , _snake_case : List[Any]="np"):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(_snake_case , return_tensors=_snake_case)
| 169
| 0
|
import os
from math import logaa
def UpperCamelCase_( lowerCamelCase_ = "base_exp.txt" ) -> int:
_lowercase : float = 0
_lowercase : Dict = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase_ ) , lowerCamelCase_ ) ) ):
_lowercase , _lowercase : Union[str, Any] = list(map(lowerCamelCase_ , line.split(',' ) ) )
if x * logaa(lowerCamelCase_ ) > largest:
_lowercase : Any = x * logaa(lowerCamelCase_ )
_lowercase : Union[str, Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 89
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer
__SCREAMING_SNAKE_CASE = OpenAIGPTTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_A : Tuple = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : Optional[int] = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
_A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
_A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w") as fp:
fp.write(json.dumps(__lowerCamelCase))
with open(self.merges_file , "w") as fp:
fp.write("\n".join(__lowerCamelCase))
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
return "lower newer", "lower newer"
def _lowerCamelCase ( self) -> Any:
_A : str = OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
_A : Any = "lower"
_A : Dict = ["low", "er</w>"]
_A : List[Any] = tokenizer.tokenize(__lowerCamelCase)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
_A : List[str] = tokens + ["<unk>"]
_A : Tuple = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase) , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase=1_5) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_A : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
# Simple input
_A : int = "This is a simple input"
_A : str = ["This is a simple input 1", "This is a simple input 2"]
_A : Optional[Any] = ("This is a simple input", "This is a pair")
_A : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length")
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length")
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length")
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length")
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
def _lowerCamelCase ( self) -> Dict:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCAmelCase__ ( a):
'''simple docstring'''
pass
| 503
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 41
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __UpperCAmelCase ( __A , __A , __A , __A , __A ) -> Any:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ = getattr(__A , __A )
if weight_type is not None:
UpperCAmelCase__ = getattr(__A , __A ).shape
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "running_mean":
UpperCAmelCase__ = value
elif weight_type == "running_var":
UpperCAmelCase__ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ = value
elif weight_type == "inv_freq":
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __A , __A , __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(__A )[0].split("." )[-2]
UpperCAmelCase__ = mapped_key.replace("*" , __A )
if "pos_bias_u" in name:
UpperCAmelCase__ = None
elif "pos_bias_v" in name:
UpperCAmelCase__ = None
elif "weight_g" in name:
UpperCAmelCase__ = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = '''weight'''
elif "running_mean" in name:
UpperCAmelCase__ = '''running_mean'''
elif "inv_freq" in name:
UpperCAmelCase__ = '''inv_freq'''
elif "running_var" in name:
UpperCAmelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase__ = '''num_batches_tracked'''
else:
UpperCAmelCase__ = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ = name.split("." )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
@torch.no_grad()
def __UpperCAmelCase ( __A , __A , __A=None , __A=None , __A=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConformerConfig.from_pretrained(__A , hidden_act="swish" )
else:
UpperCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase__ = '''rotary'''
if is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(__A , "vocab.json" )
if not os.path.isdir(__A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(__A , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__A , __A )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__A , )
UpperCAmelCase__ = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
UpperCAmelCase__ = WavaVecaConformerForCTC(__A )
else:
UpperCAmelCase__ = WavaVecaConformerForPreTraining(__A )
if is_finetuned:
UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ = fairseq.tasks.setup_task(__A )
UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__A )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(__A , __A , not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 475
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = 0.0 , lowerCamelCase = 50 , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
__magic_name__ : int = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
__magic_name__ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__magic_name__ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__magic_name__ : Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__magic_name__ : Union[str, Any] = {}
if accepts_eta:
__magic_name__ : Union[str, Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__magic_name__ : Dict = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__magic_name__ : List[Any] = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[str] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VAE
__magic_name__ : int = self.vqvae.decode(lowerCamelCase ).sample
__magic_name__ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Dict = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 154
| 0
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCamelCase : Any = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = None):
"""simple docstring"""
a : List[Any] = None
a : List[Any] = os.path.abspath(os.path.join('examples' , 'by_feature'))
a : int = os.path.abspath('examples')
for item in os.listdir(UpperCAmelCase_):
if item not in EXCLUDE_EXAMPLES:
a : Union[str, Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if os.path.isfile(UpperCAmelCase_) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase_ , feature_script=UpperCAmelCase_ , tested_section='main()' if parser_only else 'training_function()' , ):
a : Optional[int] = compare_against_test(
os.path.join(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = '\n'.join(UpperCAmelCase_)
if special_strings is not None:
for string in special_strings:
a : List[str] = diff.replace(UpperCAmelCase_ , '')
self.assertEqual(UpperCAmelCase_ , '')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase_)
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = os.path.abspath(os.path.join('examples' , 'cv_example.py'))
a : Optional[int] = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = False
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any):
"""simple docstring"""
super().setUpClass()
a : Union[str, Any] = tempfile.mkdtemp()
a : Optional[Any] = os.path.join(cls._tmpdir , 'default_config.yml')
write_basic_config(save_location=cls.configPath)
a : Tuple = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Union[str, Any] = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0')))
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
a : Any = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2')))
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : int = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0')}
""".split()
a : Dict = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_)
self.assertNotIn('epoch 0:' , UpperCAmelCase_)
self.assertIn('epoch 1:' , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[str] = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2')}
""".split()
a : int = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_)
if torch.cuda.is_available():
a : Any = torch.cuda.device_count()
else:
a : Any = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCAmelCase_)
self.assertIn('epoch 1:' , UpperCAmelCase_)
else:
self.assertIn('epoch 0:' , UpperCAmelCase_)
self.assertIn('epoch 1:' , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'}):
a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_)
a : int = re.findall('({.+})' , UpperCAmelCase_)
a : Any = [r for r in results if 'accuracy' in r][-1]
a : int = ast.literal_eval(UpperCAmelCase_)
self.assertGreaterEqual(results['accuracy'] , 0.75)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : List[str] = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'})
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
a : Tuple = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , 'tracking')))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs)
| 714
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 10**9 ) -> int:
"""simple docstring"""
a : List[str] = 1
a : Any = 2
a : List[Any] = 0
a : Optional[Any] = 0
a : Union[str, Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
a : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 610
| 0
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 2_5_6
# Modulus to hash a string
__SCREAMING_SNAKE_CASE = 1_0_0_0_0_0_3
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
a__ : Any = len(lowerCAmelCase__ )
a__ : Dict = len(lowerCAmelCase__ )
if p_len > t_len:
return False
a__ : Any = 0
a__ : Optional[Any] = 0
a__ : int = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCAmelCase__ ):
a__ : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a__ : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a__ : Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a__ : Dict = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ):
a__ : Dict = '''abc1abc12'''
a__ : Tuple = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
a__ : List[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) and not rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 2)
a__ : List[str] = '''ABABX'''
a__ : Optional[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 3)
a__ : Any = '''AAAB'''
a__ : Tuple = '''ABAAAAAB'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 4)
a__ : List[str] = '''abcdabcy'''
a__ : str = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 5)
a__ : Optional[Any] = '''Lü'''
a__ : str = '''Lüsai'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any = '''Lue'''
assert not rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 688
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=8 ):
a__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a__ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , A__ : UNetaDConditionModel , A__ : DDPMScheduler , A__ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A__ , scheduler=A__ , movq=A__ , )
a__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int=0 ) -> str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a__ , a__ : List[str] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Any , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : torch.FloatTensor , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self._execution_device
a__ : List[str] = guidance_scale > 1.0
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : Optional[int] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
a__ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a__ : Tuple = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = hint.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
a__ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : int = self.scheduler.timesteps
a__ : str = self.movq.config.latent_channels
a__ , a__ : Optional[int] = downscale_height_and_width(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : List[str] = {'''image_embeds''': image_embeds, '''hint''': hint}
a__ : Union[str, Any] = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : Dict = noise_pred.chunk(2 )
a__ , a__ : Optional[Any] = variance_pred.chunk(2 )
a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , )[0]
# post-processing
a__ : Tuple = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Union[str, Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : int = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688
| 1
|
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCamelCase ( UpperCamelCase : List[Any]="ro" , UpperCamelCase : Optional[int]="en" , UpperCamelCase : List[str]="wmt16" , UpperCamelCase : List[Any]=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
_lowerCamelCase = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
_lowerCamelCase = datasets.load_dataset(UpperCamelCase , UpperCamelCase )
if save_dir is None:
_lowerCamelCase = F"""{dataset}-{pair}"""
_lowerCamelCase = Path(UpperCamelCase )
save_dir.mkdir(exist_ok=UpperCamelCase )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
_lowerCamelCase = 'val' if split == 'validation' else split
_lowerCamelCase = save_dir.joinpath(F"""{fn}.source""" )
_lowerCamelCase = save_dir.joinpath(F"""{fn}.target""" )
_lowerCamelCase = src_path.open('w+' )
_lowerCamelCase = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_lowerCamelCase = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 234
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = 'cvt'
def __init__( self : Optional[Any] , snake_case__ : Dict=3 , snake_case__ : str=[7, 3, 3] , snake_case__ : str=[4, 2, 2] , snake_case__ : List[Any]=[2, 1, 1] , snake_case__ : Tuple=[6_4, 1_9_2, 3_8_4] , snake_case__ : Any=[1, 3, 6] , snake_case__ : Any=[1, 2, 1_0] , snake_case__ : Union[str, Any]=[4.0, 4.0, 4.0] , snake_case__ : Union[str, Any]=[0.0, 0.0, 0.0] , snake_case__ : str=[0.0, 0.0, 0.0] , snake_case__ : List[str]=[0.0, 0.0, 0.1] , snake_case__ : List[str]=[True, True, True] , snake_case__ : Union[str, Any]=[False, False, True] , snake_case__ : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , snake_case__ : int=[3, 3, 3] , snake_case__ : Union[str, Any]=[1, 1, 1] , snake_case__ : Optional[Any]=[2, 2, 2] , snake_case__ : str=[1, 1, 1] , snake_case__ : Optional[int]=[1, 1, 1] , snake_case__ : Tuple=0.02 , snake_case__ : List[str]=1e-12 , **snake_case__ : str , ) -> Optional[int]:
super().__init__(**snake_case__ )
_lowerCamelCase = num_channels
_lowerCamelCase = patch_sizes
_lowerCamelCase = patch_stride
_lowerCamelCase = patch_padding
_lowerCamelCase = embed_dim
_lowerCamelCase = num_heads
_lowerCamelCase = depth
_lowerCamelCase = mlp_ratio
_lowerCamelCase = attention_drop_rate
_lowerCamelCase = drop_rate
_lowerCamelCase = drop_path_rate
_lowerCamelCase = qkv_bias
_lowerCamelCase = cls_token
_lowerCamelCase = qkv_projection_method
_lowerCamelCase = kernel_qkv
_lowerCamelCase = padding_kv
_lowerCamelCase = stride_kv
_lowerCamelCase = padding_q
_lowerCamelCase = stride_q
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
| 234
| 1
|
'''simple docstring'''
_A = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 158
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_a : Tuple = sorted(string.lower() )
return len(lowerCAmelCase_ ) == len(set(lowerCAmelCase_ ) )
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter a string ''').strip()
__lowerCAmelCase = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 358
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = '''▁'''
UpperCAmelCase_ : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : Optional[int] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
UpperCAmelCase_ : List[str] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCAmelCase_ : List[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCAmelCase__ ( _UpperCAmelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self : Tuple,__A : Any,__A : Any="<s>",__A : Any="</s>",__A : Dict="</s>",__A : Optional[Any]="<s>",__A : str="<unk>",__A : Optional[Any]="<pad>",__A : List[Any]="<mask>",__A : Union[str, Any]=None,__A : Optional[int]=None,__A : Union[str, Any]=None,__A : Optional[Dict[str, Any]] = None,__A : Any=None,**__A : Tuple,):
_lowerCamelCase : Any = AddedToken(__UpperCamelCase,lstrip=__UpperCamelCase,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase,__UpperCamelCase ) else mask_token
_lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase,eos_token=__UpperCamelCase,unk_token=__UpperCamelCase,sep_token=__UpperCamelCase,cls_token=__UpperCamelCase,pad_token=__UpperCamelCase,mask_token=__UpperCamelCase,tokenizer_file=__UpperCamelCase,src_lang=__UpperCamelCase,tgt_lang=__UpperCamelCase,additional_special_tokens=__UpperCamelCase,sp_model_kwargs=self.sp_model_kwargs,**__UpperCamelCase,)
_lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_lowerCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : Optional[int] = len(self.sp_model )
_lowerCamelCase : Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCamelCase )
}
_lowerCamelCase : Dict = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCamelCase : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCamelCase : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCamelCase : Optional[int] = src_lang if src_lang is not None else "en_XX"
_lowerCamelCase : Optional[Any] = self.lang_code_to_id[self._src_lang]
_lowerCamelCase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
_lowerCamelCase : Optional[int] = self.__dict__.copy()
_lowerCamelCase : str = None
_lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any],__A : Dict ):
_lowerCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase_ ( self : Any ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self : int,__A : str ):
_lowerCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None,__A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase,token_ids_a=__UpperCamelCase,already_has_special_tokens=__UpperCamelCase )
_lowerCamelCase : int = [1] * len(self.prefix_tokens )
_lowerCamelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def lowerCamelCase_ ( self : str,__A : List[int],__A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Optional[Any],__A : Union[str, Any],__A : str,__A : Optional[str],__A : Optional[str],**__A : Tuple ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_lowerCamelCase : int = src_lang
_lowerCamelCase : List[str] = self(__UpperCamelCase,add_special_tokens=__UpperCamelCase,return_tensors=__UpperCamelCase,**__UpperCamelCase )
_lowerCamelCase : Union[str, Any] = self.convert_tokens_to_ids(__UpperCamelCase )
_lowerCamelCase : Tuple = tgt_lang_id
return inputs
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple,__A : str ):
return self.sp_model.encode(__UpperCamelCase,out_type=__UpperCamelCase )
def lowerCamelCase_ ( self : Dict,__A : Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Tuple = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : int,__A : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : Dict,__A : Dict ):
_lowerCamelCase : str = "".join(__UpperCamelCase ).replace(__UpperCamelCase," " ).strip()
return out_string
def lowerCamelCase_ ( self : Tuple,__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Optional[int] = os.path.join(
__UpperCamelCase,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase,"wb" ) as fi:
_lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def lowerCamelCase_ ( self : List[Any],__A : List[str],__A : str = "en_XX",__A : Optional[List[str]] = None,__A : str = "ro_RO",**__A : Tuple,):
_lowerCamelCase : List[Any] = src_lang
_lowerCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase,__UpperCamelCase,**__UpperCamelCase )
def lowerCamelCase_ ( self : Tuple ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self : List[str],__A : Tuple ):
_lowerCamelCase : Union[str, Any] = self.lang_code_to_id[src_lang]
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = [self.eos_token_id, self.cur_lang_code]
def lowerCamelCase_ ( self : Optional[int],__A : str ):
_lowerCamelCase : Union[str, Any] = self.lang_code_to_id[lang]
_lowerCamelCase : List[Any] = []
_lowerCamelCase : int = [self.eos_token_id, self.cur_lang_code]
| 718
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 11
| 0
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
UpperCamelCase = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
UpperCamelCase = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.feature_extractor
UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(_UpperCamelCase )[0].split("." )[-2]
UpperCamelCase = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
UpperCamelCase = "weight_g"
elif "weight_v" in name:
UpperCamelCase = "weight_v"
elif "bias" in name:
UpperCamelCase = "bias"
elif "weight" in name:
UpperCamelCase = "weight"
else:
UpperCamelCase = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = full_name.split("conv_layers." )[-1]
UpperCamelCase = name.split("." )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = full_name.split("adaptor." )[-1]
UpperCamelCase = name.split("." )
if items[1].isdigit():
UpperCamelCase = int(items[1] )
else:
UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
UpperCamelCase = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
UpperCamelCase = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
UpperCamelCase = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
UpperCamelCase = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
UpperCamelCase = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
def lowerCamelCase__ ( UpperCAmelCase_ )-> str:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )-> int:
"""simple docstring"""
UpperCamelCase = WavaVecaConfig.from_pretrained(
_UpperCamelCase , add_adapter=_UpperCamelCase , adapter_stride=_UpperCamelCase , adapter_kernel_size=_UpperCamelCase , use_auth_token=_UpperCamelCase , output_hidden_size=_UpperCamelCase , )
UpperCamelCase = MBartConfig.from_pretrained(_UpperCamelCase )
# load model
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
UpperCamelCase = model[0].eval()
# load feature extractor
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , use_auth_token=_UpperCamelCase )
# set weights for wav2vec2 encoder
UpperCamelCase = WavaVecaModel(_UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder , _UpperCamelCase )
# load decoder weights
UpperCamelCase = MBartForCausalLM(_UpperCamelCase )
UpperCamelCase , UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCamelCase )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
UpperCamelCase = SpeechEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCamelCase = False
UpperCamelCase = MBartaaTokenizer(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
UpperCamelCase = hf_wavavec.config.to_dict()
UpperCamelCase = tokenizer.pad_token_id
UpperCamelCase = tokenizer.bos_token_id
UpperCamelCase = tokenizer.eos_token_id
UpperCamelCase = "mbart50"
UpperCamelCase = "wav2vec2"
UpperCamelCase = tokenizer.eos_token_id
UpperCamelCase = 25_00_04
UpperCamelCase = tokenizer.eos_token_id
UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1_024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250_004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 554
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 9
SCREAMING_SNAKE_CASE = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE = kruskal(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_UpperCamelCase ) == sorted(_UpperCamelCase )
| 439
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
__A = 100
__A = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__A = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def lowerCAmelCase_ ( __a ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCamelCase__: set[int] =set()
lowerCamelCase__: int
lowerCamelCase__: int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCAmelCase_ ( __a = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __a ):
if len(partition(__a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 437
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__A = (3, 9, -11, 0, 7, 5, 1, -1)
__A = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : Iterable[int]) ->None:
'''simple docstring'''
lowerCamelCase__: Node | None =None
for i in sorted(UpperCAmelCase_ , reverse=UpperCAmelCase_):
lowerCamelCase__: Any =Node(UpperCAmelCase_ , self.head)
def __iter__(self : Optional[int]) ->Iterator[int]:
'''simple docstring'''
lowerCamelCase__: int =self.head
while node:
yield node.data
lowerCamelCase__: List[str] =node.next_node
def __len__(self : int) ->int:
'''simple docstring'''
return sum(1 for _ in self)
def __str__(self : Union[str, Any]) ->str:
'''simple docstring'''
return " -> ".join([str(UpperCAmelCase_) for node in self])
def lowerCAmelCase_ ( __a , __a ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__a ) + list(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 437
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : int , _A : Dict = "cpu" , _A : str = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = device
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizerFast.from_pretrained(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
__SCREAMING_SNAKE_CASE : str = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__SCREAMING_SNAKE_CASE : Any = torchvision.transforms.Resize(224 )
__SCREAMING_SNAKE_CASE : str = torchvision.transforms.CenterCrop(224 )
def UpperCAmelCase__ ( self : int , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.resize(_lowercase )
__SCREAMING_SNAKE_CASE : int = self.center_crop(_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.normalize(_lowercase )
return images
def __call__( self : Optional[Any] , _A : int=None , _A : Optional[int]=None , **_A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer(text=_lowercase , **_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.preprocess_img(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Union[str, Any]=10 , _A : Optional[int]=0.01 , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : int=None , _A : Optional[Any]=None , _A : Optional[Any]=None , _A : str=False , _A : Any=True , _A : Any="image" , _A : Union[str, Any]=True , _A : List[Any]=False , _A : int=False , _A : Tuple=False , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : List[str] = device if device else get_device()
if vqgan:
__SCREAMING_SNAKE_CASE : str = vqgan
else:
__SCREAMING_SNAKE_CASE : Any = load_vqgan(self.device , conf_path=_lowercase , ckpt_path=_lowercase )
self.vqgan.eval()
if clip:
__SCREAMING_SNAKE_CASE : int = clip
else:
__SCREAMING_SNAKE_CASE : List[str] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__SCREAMING_SNAKE_CASE : Tuple = ProcessorGradientFlow(device=self.device )
__SCREAMING_SNAKE_CASE : int = iterations
__SCREAMING_SNAKE_CASE : str = lr
__SCREAMING_SNAKE_CASE : int = log
__SCREAMING_SNAKE_CASE : List[Any] = make_grid
__SCREAMING_SNAKE_CASE : Tuple = return_val
__SCREAMING_SNAKE_CASE : Any = quantize
__SCREAMING_SNAKE_CASE : Optional[int] = self.vqgan.decoder.z_shape
def UpperCAmelCase__ ( self : Optional[Any] , _A : List[Any]=None , _A : Any=None , _A : List[Any]=5 , _A : List[str]=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = []
if output_path is None:
__SCREAMING_SNAKE_CASE : List[Any] = """./animation.gif"""
if input_path is None:
__SCREAMING_SNAKE_CASE : Dict = self.save_path
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '''/*''' ) )
if not len(_lowercase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(_lowercase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__SCREAMING_SNAKE_CASE : List[Any] = total_duration / len(_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = [frame_duration] * len(_lowercase )
if extend_frames:
__SCREAMING_SNAKE_CASE : Dict = 1.5
__SCREAMING_SNAKE_CASE : Union[str, Any] = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(_lowercase ) )
imageio.mimsave(_lowercase , _lowercase , duration=_lowercase )
print(F'''gif saved to {output_path}''' )
def UpperCAmelCase__ ( self : List[Any] , _A : Dict=None , _A : Tuple=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__SCREAMING_SNAKE_CASE : Any = preprocess(Image.open(_lowercase ) , target_image_size=256 ).to(self.device )
__SCREAMING_SNAKE_CASE : Dict = preprocess_vqgan(_lowercase )
__SCREAMING_SNAKE_CASE : str = self.vqgan.encode(_lowercase )
return z
def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.latent.detach().requires_grad_()
__SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
__SCREAMING_SNAKE_CASE : Optional[int] = self.vqgan.quantize(_lowercase )
else:
__SCREAMING_SNAKE_CASE : List[str] = trans_latent
return self.vqgan.decode(_lowercase )
def UpperCAmelCase__ ( self : int , _A : Any , _A : Any , _A : Optional[Any]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.clip_preprocessor(text=_lowercase , images=_lowercase , return_tensors='''pt''' , padding=_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.clip(**_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = clip_outputs.logits_per_image
if weights is not None:
__SCREAMING_SNAKE_CASE : int = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase__ ( self : List[str] , _A : Optional[Any] , _A : Optional[int] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(pos_prompts['''prompts'''] , _lowercase , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__SCREAMING_SNAKE_CASE : Tuple = self._get_clip_similarity(neg_prompts['''prompts'''] , _lowercase , weights=neg_prompts['''weights'''] )
else:
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1] , device=self.device )
__SCREAMING_SNAKE_CASE : Any = -torch.log(_lowercase ) + torch.log(_lowercase )
return loss
def UpperCAmelCase__ ( self : Any , _A : Dict , _A : Any , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = torch.randn_like(self.latent , requires_grad=_lowercase , device=self.device )
__SCREAMING_SNAKE_CASE : Dict = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__SCREAMING_SNAKE_CASE : List[str] = self._add_vector(_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = loop_post_process(_lowercase )
__SCREAMING_SNAKE_CASE : int = self._get_CLIP_loss(_lowercase , _lowercase , _lowercase )
print('''CLIP loss''' , _lowercase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=_lowercase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase__ ( self : Tuple , _A : List[Any] , _A : List[Any] , _A : Tuple ):
"""simple docstring"""
wandb.init(reinit=_lowercase , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__SCREAMING_SNAKE_CASE : Tuple = Image.open(_lowercase )
__SCREAMING_SNAKE_CASE : str = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(_lowercase ) )
def UpperCAmelCase__ ( self : str , _A : Tuple ):
"""simple docstring"""
if not prompts:
return []
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : str = []
if isinstance(_lowercase , _lowercase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(_lowercase , (tuple, list) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = prompt[0]
__SCREAMING_SNAKE_CASE : Dict = float(prompt[1] )
elif ":" in prompt:
__SCREAMING_SNAKE_CASE : Tuple = prompt.split(''':''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = float(_lowercase )
else:
__SCREAMING_SNAKE_CASE : Dict = prompt
__SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
processed_prompts.append(_lowercase )
weights.append(_lowercase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_lowercase , device=self.device ),
}
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Tuple=None , _A : Union[str, Any]=None , _A : Union[str, Any]=True , _A : List[str]=False , _A : Dict=True , _A : str=True , _A : Tuple=None , ):
"""simple docstring"""
if image_path:
__SCREAMING_SNAKE_CASE : Tuple = self._get_latent(_lowercase )
else:
__SCREAMING_SNAKE_CASE : Dict = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_lowercase , _lowercase , _lowercase )
assert pos_prompts, "You must provide at least one positive prompt."
__SCREAMING_SNAKE_CASE : Tuple = self.process_prompts(_lowercase )
__SCREAMING_SNAKE_CASE : int = self.process_prompts(_lowercase )
if save_final and save_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(_lowercase ):
os.makedirs(_lowercase )
else:
__SCREAMING_SNAKE_CASE : List[str] = save_path + """_""" + get_timestamp()
os.makedirs(_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = save_path
__SCREAMING_SNAKE_CASE : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(_lowercase ) )
__SCREAMING_SNAKE_CASE : Tuple = loop_post_process(_lowercase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_lowercase , _lowercase , _lowercase ) ):
if show_intermediate:
show_pil(_lowercase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(_lowercase )} )
if show_final:
show_pil(_lowercase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 74
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58
| 0
|
import operator as op
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
snake_case_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
else:
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
A = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 46
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 46
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCAmelCase ( lowerCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self : Dict ):
raise NotImplementedError()
| 452
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: int , _a: int )-> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((lowerCamelCase__) , (lowerCamelCase__)) = extended_euclid(_a , a % b )
lowerCamelCase__ = a // b
return (y, x - k * y)
def snake_case ( _a: int , _a: int , _a: int , _a: int )-> int:
'''simple docstring'''
((lowerCamelCase__) , (lowerCamelCase__)) = extended_euclid(_a , _a )
lowerCamelCase__ = na * na
lowerCamelCase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
((lowerCamelCase__) , (lowerCamelCase__)) = extended_euclid(_a , _a )
if b < 0:
lowerCamelCase__ = (b % n + n) % n
return b
def snake_case ( _a: int , _a: int , _a: int , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = invert_modulo(_a , _a ), invert_modulo(_a , _a )
lowerCamelCase__ = na * na
lowerCamelCase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 510
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 498
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , snake_case : str , snake_case : str ):
'''simple docstring'''
A__ , A__ : List[str] = text, pattern
A__ , A__ : List[str] = len(snake_case ), len(snake_case )
def _UpperCamelCase ( self : str , snake_case : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCamelCase ( self : Any , snake_case : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = []
for i in range(self.textLen - self.patLen + 1 ):
A__ : List[Any] = self.mismatch_in_text(snake_case )
if mismatch_index == -1:
positions.append(snake_case )
else:
A__ : Dict = self.match_in_pattern(self.text[mismatch_index] )
A__ : Union[str, Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ = '''ABAABA'''
A_ = '''AB'''
A_ = BoyerMooreSearch(text, pattern)
A_ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 498
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __magic_name__ ( lowerCamelCase__):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'deberta-v2'
def __init__( self: int , _lowerCamelCase: Optional[Any]=12_81_00 , _lowerCamelCase: str=15_36 , _lowerCamelCase: List[Any]=24 , _lowerCamelCase: Union[str, Any]=24 , _lowerCamelCase: Optional[Any]=61_44 , _lowerCamelCase: Union[str, Any]="gelu" , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: Tuple=0.1 , _lowerCamelCase: Dict=5_12 , _lowerCamelCase: List[str]=0 , _lowerCamelCase: int=0.02 , _lowerCamelCase: str=1E-7 , _lowerCamelCase: Any=False , _lowerCamelCase: Union[str, Any]=-1 , _lowerCamelCase: Optional[int]=0 , _lowerCamelCase: List[str]=True , _lowerCamelCase: List[str]=None , _lowerCamelCase: Optional[int]=0 , _lowerCamelCase: str="gelu" , **_lowerCamelCase: Optional[Any] , ):
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = relative_attention
SCREAMING_SNAKE_CASE_ = max_relative_positions
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
SCREAMING_SNAKE_CASE_ = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE_ = pos_att_type
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = kwargs.get('''pooler_hidden_size''' , lowercase__ )
SCREAMING_SNAKE_CASE_ = pooler_dropout
SCREAMING_SNAKE_CASE_ = pooler_hidden_act
class __magic_name__ ( lowerCamelCase__):
'''simple docstring'''
@property
def _A ( self: Dict ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def _A ( self: str ):
return 12
def _A ( self: List[Any] , _lowerCamelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCamelCase: int = -1 , _lowerCamelCase: int = -1 , _lowerCamelCase: int = -1 , _lowerCamelCase: bool = False , _lowerCamelCase: Optional["TensorType"] = None , _lowerCamelCase: int = 3 , _lowerCamelCase: int = 40 , _lowerCamelCase: int = 40 , _lowerCamelCase: "PreTrainedTokenizerBase" = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 234
|
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __magic_name__ ( __lowerCAmelCase):
A: List[Any] = "pegasus"
A: int = ["past_key_values"]
A: Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , lowerCamelCase__ : Dict=50265 , lowerCamelCase__ : Tuple=1024 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Dict=4096 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : str=4096 , lowerCamelCase__ : str=16 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : int=1024 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : Tuple=1 , **lowerCamelCase__ : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : Optional[int] = d_model
UpperCamelCase__ : Dict = encoder_ffn_dim
UpperCamelCase__ : str = encoder_layers
UpperCamelCase__ : Union[str, Any] = encoder_attention_heads
UpperCamelCase__ : Any = decoder_ffn_dim
UpperCamelCase__ : Union[str, Any] = decoder_layers
UpperCamelCase__ : Optional[int] = decoder_attention_heads
UpperCamelCase__ : Any = dropout
UpperCamelCase__ : Any = attention_dropout
UpperCamelCase__ : Optional[int] = activation_dropout
UpperCamelCase__ : Union[str, Any] = activation_function
UpperCamelCase__ : Union[str, Any] = init_std
UpperCamelCase__ : Any = encoder_layerdrop
UpperCamelCase__ : int = decoder_layerdrop
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : str = encoder_layers
UpperCamelCase__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
| 717
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __magic_name__ ( __lowerCAmelCase):
A: List[Any] = "roberta-prelayernorm"
def __init__( self : Tuple , lowerCamelCase__ : List[Any]=50265 , lowerCamelCase__ : Optional[Any]=768 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Dict=3072 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : List[Any]=1E-1_2 , lowerCamelCase__ : str=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Union[str, Any]="absolute" , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : Optional[int] = use_cache
UpperCamelCase__ : int = classifier_dropout
class __magic_name__ ( __lowerCAmelCase):
@property
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 106
| 0
|
'''simple docstring'''
from __future__ import annotations
_lowerCAmelCase = 10
def UpperCamelCase ( a ) -> list[int]:
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = max(a )
while placement <= max_digit:
# declare and initialize empty buckets
__magic_name__ = [[] for _ in range(a )]
# split list_of_ints between the buckets
for i in list_of_ints:
__magic_name__ = int((i / placement) % RADIX )
buckets[tmp].append(a )
# put each buckets' contents into list_of_ints
__magic_name__ = 0
for b in range(a ):
for i in buckets[b]:
__magic_name__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=a__ , )
assert hasattr(self , '''env''' )
def snake_case__ ( self : str , a__ : int=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def snake_case__ ( self : Optional[int] , a__ : Tuple ):
TrainingJobAnalytics(a__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def snake_case__ ( self : Any ):
# create estimator
__magic_name__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__magic_name__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__magic_name__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__ )
| 432
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "swinv2"
_a = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , _a=32 , **_a , ) -> List[str]:
super().__init__(**_a )
_A : Optional[int] = image_size
_A : List[Any] = patch_size
_A : List[str] = num_channels
_A : Optional[Any] = embed_dim
_A : List[str] = depths
_A : List[Any] = len(_a )
_A : str = num_heads
_A : Optional[int] = window_size
_A : List[str] = mlp_ratio
_A : Union[str, Any] = qkv_bias
_A : List[Any] = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : List[str] = drop_path_rate
_A : Optional[int] = hidden_act
_A : List[str] = use_absolute_embeddings
_A : Dict = layer_norm_eps
_A : Optional[Any] = initializer_range
_A : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A : str = int(embed_dim * 2 ** (len(_a ) - 1) )
_A : Tuple = (0, 0, 0, 0)
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
'''simple docstring'''
def a__ ( UpperCamelCase_ : Optional[Any] ):
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
UpperCAmelCase__ :List[Any] = []
def generate(UpperCamelCase_ : Tuple, UpperCamelCase_ : Tuple ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1, SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase__ , UpperCAmelCase__ :Dict = arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase__ , UpperCAmelCase__ :Optional[Any] = arr[k - 1], arr[0]
generate(k - 1, SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
__lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 467
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451
| 0
|
import math
import qiskit
def UpperCAmelCase_( a__ = 1 , a__ = 1 , a__ = 1 ):
"""simple docstring"""
if (
isinstance(a__ , a__ )
or isinstance(a__ , a__ )
or isinstance(a__ , a__ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(a__ ) != input_a)
or (math.floor(a__ ) != input_a)
or (math.floor(a__ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
SCREAMING_SNAKE_CASE : List[Any] = qiskit.QuantumRegister(4 , '''qr''' )
SCREAMING_SNAKE_CASE : Optional[int] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
SCREAMING_SNAKE_CASE : Tuple = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE : Optional[int] = qiskit.QuantumCircuit(a__ , a__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a__ ) # measure the last two qbits
SCREAMING_SNAKE_CASE : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
SCREAMING_SNAKE_CASE : str = qiskit.execute(a__ , a__ , shots=1_000 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 333
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_stages
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = out_features
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : str = num_stages
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCAmelCase ( self ) ->List[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = UperNetModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Optional[int]:
return
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = _config_zero_init(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
SCREAMING_SNAKE_CASE : int = Image.open(a__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
SCREAMING_SNAKE_CASE : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Any = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 333
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __magic_name__ :
def __init__( self : int ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Any=1_3 ,__SCREAMING_SNAKE_CASE : str=7 ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : Dict=True ,__SCREAMING_SNAKE_CASE : Union[str, Any]=9_9 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=3_2 ,__SCREAMING_SNAKE_CASE : Tuple=5 ,__SCREAMING_SNAKE_CASE : List[str]=4 ,__SCREAMING_SNAKE_CASE : List[str]=3_7 ,__SCREAMING_SNAKE_CASE : int="gelu" ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[Any]=0.1 ,__SCREAMING_SNAKE_CASE : List[Any]=5_1_2 ,__SCREAMING_SNAKE_CASE : Tuple=1_6 ,__SCREAMING_SNAKE_CASE : Optional[Any]=2 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.02 ,__SCREAMING_SNAKE_CASE : Tuple=3 ,__SCREAMING_SNAKE_CASE : Tuple=4 ,__SCREAMING_SNAKE_CASE : Dict=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,*__SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = OpenAIGPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,head_mask=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : List[str] ,*__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = OpenAIGPTLMHeadModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Dict ,*__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Dict ,*__SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( _a , _a , _a , unittest.TestCase):
_UpperCAmelCase : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCAmelCase : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCAmelCase : Any = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Tuple=False ):
UpperCAmelCase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = inputs_dict["labels"]
UpperCAmelCase = inputs_dict["labels"]
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,n_embd=3_7 )
def _UpperCAmelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] ,dtype=torch.long ,device=__SCREAMING_SNAKE_CASE ) # the president is
UpperCAmelCase = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(__SCREAMING_SNAKE_CASE ,do_sample=__SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() ,__SCREAMING_SNAKE_CASE )
| 333
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
def __init__( self : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str=1_3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=7 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : Tuple=9_9 ,__SCREAMING_SNAKE_CASE : str=3_2 ,__SCREAMING_SNAKE_CASE : Any=2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=4 ,__SCREAMING_SNAKE_CASE : Tuple=3_7 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : List[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,__SCREAMING_SNAKE_CASE : Dict=1_6 ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : List[str]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3 ,__SCREAMING_SNAKE_CASE : Dict=4 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Dict=1_0_0_0 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = range_bbox
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase = bbox[i, j, 3]
UpperCAmelCase = bbox[i, j, 1]
UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase = bbox[i, j, 2]
UpperCAmelCase = bbox[i, j, 0]
UpperCAmelCase = t
UpperCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFLayoutLMModel(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = TFLayoutLMForMaskedLM(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMForTokenClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFLayoutLMForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( _a , _a , unittest.TestCase):
_UpperCAmelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : int = True
_UpperCAmelCase : Union[str, Any] = 10
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = TFLayoutLMModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=3_7 )
def _UpperCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[str] ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFLayoutLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def _UpperCAmelCase ( self : List[str] ):
pass
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCAmelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
# test the sequence output on [0, :3, :3]
UpperCAmelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# initialize model with randomly initialized sequence classification head
UpperCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=2 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(
input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=tf.convert_to_tensor([1, 1] ) ,)
# test whether we get a loss as a scalar
UpperCAmelCase = outputs.loss
UpperCAmelCase = (2,)
self.assertEqual(loss.shape ,__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = outputs.logits
UpperCAmelCase = (2, 2)
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Tuple ):
# initialize model with randomly initialized token classification head
UpperCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=1_3 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(
input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = outputs.logits
UpperCAmelCase = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[Any] ):
# initialize model with randomly initialized token classification head
UpperCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape ,__SCREAMING_SNAKE_CASE )
self.assertEqual(outputs.end_logits.shape ,__SCREAMING_SNAKE_CASE )
| 333
| 1
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_SCREAMING_SNAKE_CASE = 100
_SCREAMING_SNAKE_CASE = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_SCREAMING_SNAKE_CASE = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def __lowerCamelCase ( __lowerCAmelCase : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
snake_case = set()
snake_case = 42
snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __lowerCamelCase ( __lowerCAmelCase : int = 50_00 ) -> int | None:
for number_to_partition in range(1 , __lowerCAmelCase ):
if len(partition(__lowerCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 517
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple )-> Dict:
snake_case = psutil.Process()
snake_case = False
def lowerCAmelCase ( self : int )-> Optional[int]:
snake_case = -1
while True:
snake_case = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase ( self : Union[str, Any] )-> Union[str, Any]:
snake_case = True
snake_case = threading.Thread(target=self.peak_monitor )
snake_case = True
self.thread.start()
def lowerCAmelCase ( self : int )-> Optional[Any]:
snake_case = False
self.thread.join()
return self.cpu_memory_peak
_SCREAMING_SNAKE_CASE = PeakCPUMemory()
def __lowerCamelCase ( ) -> List[Any]:
# Time
snake_case = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case = torch.cuda.memory_allocated(__lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> str:
# Time
snake_case = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
snake_case = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case = (torch.cuda.memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
snake_case = (torch.cuda.max_memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
return measures
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> str:
print(F'''{description}:''' )
print(F'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(__lowerCAmelCase )]:.2f}MiB''' )
snake_case = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 517
| 1
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =min(UpperCamelCase__ ) # min() finds the minimum value
SCREAMING_SNAKE_CASE__ : int =max(UpperCamelCase__ ) # max() finds the maximum value
SCREAMING_SNAKE_CASE__ : List[Any] =max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE__ : Tuple =[0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__, UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE__ : List[Any] =0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE__ : Any =count + min_val
i += 1
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print('''Sorted order is:''', ''' '''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 296
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
a_ = TypeVar('_T')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
def __init__( self : Union[str, Any] , __lowercase : Iterable[_T] | None = None ) -> None:
SCREAMING_SNAKE_CASE__ : list[_T] =list(iterable or [] )
SCREAMING_SNAKE_CASE__ : list[_T] =[]
def __len__( self : Union[str, Any] ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Dict ) -> str:
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def __magic_name__ ( self : str , __lowercase : _T ) -> None:
self._stacka.append(__lowercase )
def __magic_name__ ( self : List[Any] ) -> _T:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._stacka.pop
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
| 1
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowerCamelCase = 0
for log in Path().glob("""*.log"""):
lowerCamelCase = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase = []
log.unlink()
lowerCamelCase = """"""
lowerCamelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase = []
lowerCamelCase = {}
for test in failed_tests:
lowerCamelCase = test[0].split("""::""")
lowerCamelCase = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase = [test[0] for test in failed_table]
lowerCamelCase = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
lowerCamelCase = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase = len(err) + 10
lowerCamelCase = message[: 3_000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowerCamelCase = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowerCamelCase = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowerCamelCase = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase = row[0]
else:
lowerCamelCase = """"""
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 14
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 585
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : Dict=False , lowerCamelCase : Any=False ) -> Union[str, Any]:
lowerCAmelCase__ : str = "backbone." if is_semantic else ""
lowerCAmelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", "beit.embeddings.cls_token"),
(F"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(F"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(F"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : int=False , lowerCamelCase : Union[str, Any]=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ : Optional[int] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCAmelCase__ : Any = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
lowerCAmelCase__ : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
lowerCAmelCase__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Optional[Any] = q_bias
lowerCAmelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : List[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
lowerCAmelCase__ : Union[str, Any] = gamma_a
lowerCAmelCase__ : Optional[Any] = gamma_a
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ) -> List[Any]:
lowerCAmelCase__ : Dict = dct.pop(lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = val
def lowercase__ ( ) -> Any:
lowerCAmelCase__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : Tuple = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : List[str]=False ) -> int:
lowerCAmelCase__ : Optional[int] = False if "rvlcdip" in checkpoint_url else True
lowerCAmelCase__ : Any = BeitConfig(use_absolute_position_embeddings=lowerCamelCase , use_mask_token=lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 1_0_2_4
lowerCAmelCase__ : Any = 4_0_9_6
lowerCAmelCase__ : int = 2_4
lowerCAmelCase__ : Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 1_6
lowerCAmelCase__ : str = "huggingface/label-files"
lowerCAmelCase__ : List[str] = "rvlcdip-id2label.json"
lowerCAmelCase__ : Tuple = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ : Tuple = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
lowerCAmelCase__ : Union[str, Any] = create_rename_keys(lowerCamelCase , has_lm_head=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , has_lm_head=lowerCamelCase )
# load HuggingFace model
lowerCAmelCase__ : Union[str, Any] = BeitForMaskedImageModeling(lowerCamelCase ) if has_lm_head else BeitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# Check outputs on an image
lowerCAmelCase__ : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=lowerCamelCase , return_tensors="pt" )
lowerCAmelCase__ : Any = encoding["pixel_values"]
lowerCAmelCase__ : Optional[Any] = model(lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# verify logits
lowerCAmelCase__ : str = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowerCamelCase ), "Shape of logits not as expected"
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
if has_lm_head:
lowerCAmelCase__ : List[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCAmelCase__ : Tuple = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 0
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowerCamelCase : int = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCamelCase : str = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowerCamelCase : List[Any] = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowerCamelCase : Union[str, Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowerCamelCase : List[str] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowerCamelCase : str = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowerCamelCase : List[str] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowerCamelCase : List[Any] = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowerCamelCase : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowerCamelCase : Any = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowerCamelCase : Any = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowerCamelCase : Dict = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_MAPPING
lowerCamelCase : Any = auto_class_update(FlaxAutoModel)
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : List[str] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase : Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __snake_case( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase : int = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 704
|
"""simple docstring"""
import os
import string
import sys
lowerCamelCase : Any = 1 << 8
lowerCamelCase : Optional[int] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
lowerCamelCase : str = KEYMAP["""up"""]
lowerCamelCase : List[str] = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase : Dict = []
lowerCamelCase : Optional[int] = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowerCamelCase : Tuple = ord(str(i))
def A__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
_SCREAMING_SNAKE_CASE = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCamelCase__ ) == 0:
# Read the keystroke
_SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(UpperCamelCase__ )
if ord(UpperCamelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_SCREAMING_SNAKE_CASE = chr(KEYMAP['''esc'''] )
except KeyError:
_SCREAMING_SNAKE_CASE = cha[1]
else:
_SCREAMING_SNAKE_CASE = ch.decode(UpperCamelCase__ )
else:
_SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_SCREAMING_SNAKE_CASE = sys.stdin.fileno()
_SCREAMING_SNAKE_CASE = termios.tcgetattr(UpperCamelCase__ )
try:
tty.setraw(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCamelCase__ , termios.TCSADRAIN , UpperCamelCase__ )
return ch
def A__ ( ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCamelCase__ ) == KEYMAP["esc"]:
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) == KEYMAP["mod_int"]:
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCamelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCamelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 168
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """time_series_transformer"""
_A : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self , lowercase__ = None , lowercase__ = None , lowercase__ = "student_t" , lowercase__ = "nll" , lowercase__ = 1 , lowercase__ = [1, 2, 3, 4, 5, 6, 7] , lowercase__ = "mean" , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = 32 , lowercase__ = 32 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = True , lowercase__ = "gelu" , lowercase__ = 64 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 1_00 , lowercase__ = 0.02 , lowercase__=True , **lowercase__ , ):
# time series specific configuration
snake_case_ : Optional[int] = prediction_length
snake_case_ : Any = context_length or prediction_length
snake_case_ : Tuple = distribution_output
snake_case_ : Any = loss
snake_case_ : List[str] = input_size
snake_case_ : Tuple = num_time_features
snake_case_ : Optional[int] = lags_sequence
snake_case_ : Union[str, Any] = scaling
snake_case_ : Union[str, Any] = num_dynamic_real_features
snake_case_ : Optional[int] = num_static_real_features
snake_case_ : List[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ : Optional[Any] = cardinality
else:
snake_case_ : Union[str, Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ : int = embedding_dimension
else:
snake_case_ : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ : str = num_parallel_samples
# Transformer architecture configuration
snake_case_ : int = input_size * len(lowercase__ ) + self._number_of_features
snake_case_ : Tuple = d_model
snake_case_ : int = encoder_attention_heads
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : Dict = encoder_ffn_dim
snake_case_ : Dict = decoder_ffn_dim
snake_case_ : str = encoder_layers
snake_case_ : int = decoder_layers
snake_case_ : Any = dropout
snake_case_ : Tuple = attention_dropout
snake_case_ : List[Any] = activation_dropout
snake_case_ : List[Any] = encoder_layerdrop
snake_case_ : Union[str, Any] = decoder_layerdrop
snake_case_ : str = activation_function
snake_case_ : int = init_std
snake_case_ : str = use_cache
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 480
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[str]] , SCREAMING_SNAKE_CASE__ : int , ):
"""simple docstring"""
snake_case_ : Any = len(SCREAMING_SNAKE_CASE__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : list[list[str]] = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE__ )
print("""""" )
print(len(SCREAMING_SNAKE_CASE__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 480
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bool , __magic_name__ : bool ) -> int:
"""simple docstring"""
def run_func(__magic_name__ : str ):
@wraps(__magic_name__ )
def run_in_eager_mode(*__magic_name__ : Optional[Any] , **__magic_name__ : List[Any] ):
return func(*__magic_name__ , **__magic_name__ )
@wraps(__magic_name__ )
@tf.function(experimental_compile=__magic_name__ )
def run_in_graph_mode(*__magic_name__ : Any , **__magic_name__ : List[Any] ):
return func(*__magic_name__ , **__magic_name__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = random.Random()
UpperCamelCase :Dict = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__magic_name__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : TensorFlowBenchmarkArguments
snake_case__ : PretrainedConfig
snake_case__ : str = "TensorFlow"
@property
def _A ( self : List[Any] ):
return tf.__version__
def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
# initialize GPU on separate process
UpperCamelCase :List[Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase :Union[str, Any] = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_inference )
def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
UpperCamelCase :int = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase :Union[str, Any] = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_train )
def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
UpperCamelCase :List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase :List[str] = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_inference )
def _A ( self : str , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
UpperCamelCase :List[Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase :Any = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_train )
def _A ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
UpperCamelCase :Optional[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase :List[Any] = (
hasattr(__lowerCamelCase , """architectures""" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase :Optional[Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase :List[str] = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase :Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase :int = TF_MODEL_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
UpperCamelCase :Optional[int] = config.vocab_size if hasattr(__lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase :str = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , training=__lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCamelCase , training=__lowerCamelCase )
UpperCamelCase :int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
UpperCamelCase :Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase :Optional[int] = (
hasattr(__lowerCamelCase , """architectures""" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase :Any = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase :List[str] = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase :Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase :Tuple = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
UpperCamelCase :Union[str, Any] = config.vocab_size if hasattr(__lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase :List[str] = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase :Dict = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
UpperCamelCase :Dict = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase :List[Any] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
UpperCamelCase :Tuple = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
UpperCamelCase :Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _A ( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase :List[Any] = timeit.repeat(
__lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def _A ( self : Dict , __lowerCamelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
UpperCamelCase :int = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
UpperCamelCase :Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase :int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase :int = nvml.nvmlDeviceGetMemoryInfo(__lowerCamelCase )
UpperCamelCase :List[str] = meminfo.used
UpperCamelCase :Optional[int] = Memory(__lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
UpperCamelCase :Optional[int] = None
else:
UpperCamelCase :Any = measure_peak_memory_cpu(__lowerCamelCase )
UpperCamelCase :str = Memory(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase :Optional[int] = stop_memory_tracing(__lowerCamelCase )
if memory is None:
UpperCamelCase :List[Any] = summary.total
else:
UpperCamelCase :Dict = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 590
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 590
| 1
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 277
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowercase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _distribute_shards(**lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _split_gen_kwargs(lowercase ,lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowercase ):
_number_of_shards_in_gen_kwargs(lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(lowercase )
assert out == expected
| 277
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 376
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( snake_case__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
raise NotImplementedError()
| 376
| 1
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("""<mask>""" ) == 1
lowercase__ = torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
lowercase__ = model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
lowercase__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase__ = logits[0, masked_index, :]
lowercase__ = logits.softmax(dim=0 )
lowercase__ , lowercase__ = prob.topk(k=lowercase__ , dim=0 )
lowercase__ = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
lowercase__ = tokenizer.mask_token
lowercase__ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
lowercase__ = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__A = CamembertTokenizer.from_pretrained("camembert-base")
__A = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__A = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 325
|
'''simple docstring'''
import math
import qiskit
def _A ( lowercase__ = 1 , lowercase__ = 1 , lowercase__ = 1 ):
if (
isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
lowercase__ = qiskit.QuantumRegister(4 , """qr""" )
lowercase__ = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
lowercase__ = [input_a, input_a, carry_in]
lowercase__ = qiskit.QuantumCircuit(lowercase__ , lowercase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase__ ) # measure the last two qbits
lowercase__ = qiskit.Aer.get_backend("""aer_simulator""" )
lowercase__ = qiskit.execute(lowercase__ , lowercase__ , shots=1000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 325
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
stooge(__snake_case , 0 , len(__snake_case ) - 1 )
return arr
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE : Optional[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__snake_case , __snake_case , (h - t) )
# Recursively sort last 2/3 elements
stooge(__snake_case , i + t , (__snake_case) )
# Recursively sort first 2/3 elements
stooge(__snake_case , __snake_case , (h - t) )
if __name__ == "__main__":
a__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
a__ : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 712
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (DDPMScheduler,)
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->int:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def __lowerCAmelCase ( self ) ->int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE : str = pred_prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE : int = -1
else:
SCREAMING_SNAKE_CASE : List[Any] = timesteps[i + 1]
SCREAMING_SNAKE_CASE : int = scheduler.previous_timestep(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 333
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61
|
import operator as op
UpperCamelCase = 'scaler.pt'
UpperCamelCase = 'pytorch_model'
UpperCamelCase = 'random_states'
UpperCamelCase = 'optimizer'
UpperCamelCase = 'scheduler'
UpperCamelCase = 'pytorch_model.bin'
UpperCamelCase = 'pytorch_model.bin.index.json'
UpperCamelCase = 'model.safetensors'
UpperCamelCase = 'model.safetensors.index.json'
UpperCamelCase = '1.10.2'
UpperCamelCase = 'py38'
UpperCamelCase = '4.17.0'
UpperCamelCase = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
UpperCamelCase = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
UpperCamelCase = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
UpperCamelCase = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
UpperCamelCase = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
UpperCamelCase = '2.0.1'
UpperCamelCase = ['pdsh', 'standard', 'openmpi', 'mvapich']
UpperCamelCase = ['default', 'reduce-overhead', 'max-autotune']
UpperCamelCase = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
UpperCamelCase = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
UpperCamelCase = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 61
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __lowerCAmelCase ( UpperCamelCase_ ):
_a = """speech_to_text"""
_a = ["""past_key_values"""]
_a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase=10_000 , lowerCAmelCase=12 , lowerCAmelCase=2_048 , lowerCAmelCase=4 , lowerCAmelCase=6 , lowerCAmelCase=2_048 , lowerCAmelCase=4 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=256 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=6_000 , lowerCAmelCase=1_024 , lowerCAmelCase=2 , lowerCAmelCase=(5, 5) , lowerCAmelCase=1_024 , lowerCAmelCase=80 , lowerCAmelCase=1 , **lowerCAmelCase , ) -> str:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =d_model
_lowercase =encoder_ffn_dim
_lowercase =encoder_layers
_lowercase =encoder_attention_heads
_lowercase =decoder_ffn_dim
_lowercase =decoder_layers
_lowercase =decoder_attention_heads
_lowercase =dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =activation_function
_lowercase =init_std
_lowercase =encoder_layerdrop
_lowercase =decoder_layerdrop
_lowercase =use_cache
_lowercase =encoder_layers
_lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase =max_source_positions
_lowercase =max_target_positions
_lowercase =num_conv_layers
_lowercase =list(__a )
_lowercase =conv_channels
_lowercase =input_feat_per_channel
_lowercase =input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 717
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a ( A__ : Tuple ) -> Any:
"""simple docstring"""
_lowercase =model.config
_lowercase =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowercase =MBartConfig(
is_decoder=A__ , is_encoder_decoder=A__ , add_cross_attention=A__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=A__ , add_final_layer_norm=A__ , )
return encoder_config, decoder_config
def a ( A__ : Union[str, Any] ) -> str:
"""simple docstring"""
if "encoder.model" in name:
_lowercase =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
_lowercase =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
_lowercase =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
_lowercase ='encoder.' + name
if "attn.proj" in name:
_lowercase =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
_lowercase =name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowercase =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowercase =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_lowercase ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
_lowercase ='encoder.layernorm.bias'
return name
def a ( A__ : Optional[Any] , A__ : List[Any] ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(A__ )
if "qkv" in key:
_lowercase =key.split('.' )
_lowercase =int(key_split[3] )
_lowercase =int(key_split[5] )
_lowercase =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowercase =val
return orig_state_dict
def a ( A__ : str , A__ : List[str]=None , A__ : List[Any]=False ) -> List[str]:
"""simple docstring"""
_lowercase =DonutModel.from_pretrained(A__ ).eval()
# load HuggingFace model
_lowercase , _lowercase =get_configs(A__ )
_lowercase =DonutSwinModel(A__ )
_lowercase =MBartForCausalLM(A__ )
_lowercase =VisionEncoderDecoderModel(encoder=A__ , decoder=A__ )
model.eval()
_lowercase =original_model.state_dict()
_lowercase =convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
# verify results on scanned document
_lowercase =load_dataset('hf-internal-testing/example-documents' )
_lowercase =dataset['test'][0]['image'].convert('RGB' )
_lowercase =XLMRobertaTokenizerFast.from_pretrained(A__ , from_slow=A__ )
_lowercase =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowercase =DonutProcessor(A__ , A__ )
_lowercase =processor(A__ , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowercase ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_lowercase ='When is the coffee break?'
_lowercase =task_prompt.replace('{user_input}' , A__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowercase ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowercase ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowercase ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowercase ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowercase ='hello world'
else:
raise ValueError('Model name not supported' )
_lowercase =original_model.decoder.tokenizer(A__ , add_special_tokens=A__ , return_tensors='pt' )[
'input_ids'
]
_lowercase =original_model.encoder.model.patch_embed(A__ )
_lowercase , _lowercase =model.encoder.embeddings(A__ )
assert torch.allclose(A__ , A__ , atol=1e-3 )
# verify encoder hidden states
_lowercase =original_model.encoder(A__ )
_lowercase =model.encoder(A__ ).last_hidden_state
assert torch.allclose(A__ , A__ , atol=1e-2 )
# verify decoder hidden states
_lowercase =original_model(A__ , A__ , A__ ).logits
_lowercase =model(A__ , decoder_input_ids=A__ ).logits
assert torch.allclose(A__ , A__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowercase_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 380
| 0
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ : Optional[Any] = sys.version_info >= (3, 10)
def _lowerCAmelCase ( __snake_case : Union[str, Any]=None , __snake_case : int=None ) -> str:
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''titi'''
lowerCAmelCase = '''toto'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''titi'''
lowerCAmelCase = '''toto'''
lowerCAmelCase = 42
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = BasicEnum(self.foo)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = MixedTypeEnum(self.foo)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''help message'''} )
lowerCAmelCase = None
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[1, 2, 3] )
lowerCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field()
lowerCAmelCase = field()
lowerCAmelCase = field()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = BasicEnum(self.required_enum)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = field()
lowerCAmelCase = None
lowerCAmelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowerCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''help message'''} )
lowerCAmelCase = None
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[] )
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
__A : List[Any] = {k: v for k, v in vars(_UpperCAmelCase).items() if k != 'container'}
__A : Union[str, Any] = {k: v for k, v in vars(_UpperCAmelCase).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , _UpperCAmelCase) and yy.get('choices' , _UpperCAmelCase):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](_UpperCAmelCase) , yy['type'](_UpperCAmelCase))
del xx["type"], yy["type"]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
__A : int = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--bar' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--baz' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--flag' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__A) ,) : Any = parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase)
self.assertFalse(example.flag)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = HfArgumentParser(_UpperCAmelCase)
__A : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=_UpperCAmelCase)
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message')
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
expected.add_argument('--baz' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=_UpperCAmelCase , dest='baz')
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase)
__A : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase)
for dataclass_type in dataclass_types:
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : str = parser.parse_args([])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Tuple = parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Optional[Any] = parser.parse_args(['--foo', '--baz'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Optional[int] = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : List[Any] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
__A : Tuple = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = parser.parse_args([])
self.assertEqual(args.foo , 'toto')
__A : Optional[Any] = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
__A : Union[str, Any] = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
__A : List[Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
__A : Dict = parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
__A : Tuple = parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
__A : str = HfArgumentParser(_UpperCAmelCase)
__A : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = parser.parse_args([])
self.assertEqual(args.foo , 'toto')
__A : Optional[int] = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
__A : Optional[Any] = parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_UpperCAmelCase)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_UpperCAmelCase)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = parser.parse_args([])
self.assertEqual(
_UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
__A : Optional[int] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=_UpperCAmelCase , type=_UpperCAmelCase)
expected.add_argument('--bar' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='help message')
expected.add_argument('--baz' , default=_UpperCAmelCase , type=_UpperCAmelCase)
expected.add_argument('--ces' , nargs='+' , default=[] , type=_UpperCAmelCase)
expected.add_argument('--des' , nargs='+' , default=[] , type=_UpperCAmelCase)
__A : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase)
for dataclass_type in dataclass_types:
__A : Dict = HfArgumentParser(_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = parser.parse_args([])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[]))
__A : str = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--required_str' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = HfArgumentParser(_UpperCAmelCase)
__A : Tuple = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase)
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : List[str] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__A : str = parser.parse_dict(_UpperCAmelCase)[0]
__A : Optional[Any] = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = HfArgumentParser(_UpperCAmelCase)
__A : List[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = HfArgumentParser(_UpperCAmelCase)
__A : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[Any] = os.path.join(_UpperCAmelCase , 'temp_json')
os.mkdir(_UpperCAmelCase)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
__A : str = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[str] = os.path.join(_UpperCAmelCase , 'temp_yaml')
os.mkdir(_UpperCAmelCase)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(_UpperCAmelCase , _UpperCAmelCase)
__A : str = parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
__A : Optional[Any] = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
| 8
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ) ->List[Any]:
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=False ) ->Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
_SCREAMING_SNAKE_CASE = """"""
else:
_SCREAMING_SNAKE_CASE = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( __lowerCamelCase : Dict ) ->Dict:
_SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] ) ->Union[str, Any]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_SCREAMING_SNAKE_CASE = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) ->Optional[Any]:
_SCREAMING_SNAKE_CASE = dct.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = val
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple ) ->List[str]:
_SCREAMING_SNAKE_CASE = ViTMSNConfig()
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = """datasets/huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 384
_SCREAMING_SNAKE_CASE = 1536
_SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 1024
_SCREAMING_SNAKE_CASE = 4096
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = 1024
_SCREAMING_SNAKE_CASE = 4096
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = ViTMSNModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""target_encoder"""]
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = create_rename_keys(__lowerCamelCase , base_model=__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , base_model=__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
_SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCamelCase , image_std=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCamelCase , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 314
| 0
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
A_ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 498
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
A_ = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
A_ = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
A_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
A_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 498
| 1
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case : Optional[Any] = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Tuple =UNetaDModel
_lowerCamelCase : Any ="""sample"""
@property
def A__ ( self : Optional[Any] ):
A__ = 4
A__ = 3
A__ = (3_2, 3_2)
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
A__ = torch.tensor([1_0] ).to(_lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self : Any ):
return (3, 3_2, 3_2)
@property
def A__ ( self : List[str] ):
return (3, 3_2, 3_2)
def A__ ( self : List[str] ):
A__ = {
'''block_out_channels''': (3_2, 6_4),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 3_2,
}
A__ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] =UNetaDModel
_lowerCamelCase : Tuple ="""sample"""
@property
def A__ ( self : Dict ):
A__ = 4
A__ = 4
A__ = (3_2, 3_2)
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
A__ = torch.tensor([1_0] ).to(_lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self : Dict ):
return (4, 3_2, 3_2)
@property
def A__ ( self : Any ):
return (4, 3_2, 3_2)
def A__ ( self : Union[str, Any] ):
A__ = {
'''sample_size''': 3_2,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (3_2, 6_4),
'''attention_head_dim''': 3_2,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
A__ = self.dummy_input
return init_dict, inputs_dict
def A__ ( self : int ):
A__ , A__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
A__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def A__ ( self : List[Any] ):
A__ , A__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_lowerCamelCase )
model.to(_lowerCamelCase )
A__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def A__ ( self : Optional[Any] ):
A__ , A__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_lowerCamelCase )
model_accelerate.to(_lowerCamelCase )
model_accelerate.eval()
A__ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
A__ = noise.to(_lowerCamelCase )
A__ = torch.tensor([1_0] * noise.shape[0] ).to(_lowerCamelCase )
A__ = model_accelerate(_lowerCamelCase , _lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
A__ , A__ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_lowerCamelCase , low_cpu_mem_usage=_lowerCamelCase )
model_normal_load.to(_lowerCamelCase )
model_normal_load.eval()
A__ = model_normal_load(_lowerCamelCase , _lowerCamelCase )['''sample''']
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1E-3 )
def A__ ( self : str ):
A__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_lowerCamelCase )
A__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A__ = noise.to(_lowerCamelCase )
A__ = torch.tensor([1_0] * noise.shape[0] ).to(_lowerCamelCase )
with torch.no_grad():
A__ = model(_lowerCamelCase , _lowerCamelCase ).sample
A__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A__ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1E-3 ) )
class UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Tuple =UNetaDModel
_lowerCamelCase : Any ="""sample"""
@property
def A__ ( self : List[str] , _lowerCamelCase : Any=(3_2, 3_2) ):
A__ = 4
A__ = 3
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
A__ = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=_lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self : Any ):
return (3, 3_2, 3_2)
@property
def A__ ( self : Optional[int] ):
return (3, 3_2, 3_2)
def A__ ( self : List[Any] ):
A__ = {
'''block_out_channels''': [3_2, 6_4, 6_4, 6_4],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
A__ = self.dummy_input
return init_dict, inputs_dict
@slow
def A__ ( self : List[Any] ):
A__ , A__ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
A__ = self.dummy_input
A__ = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(_lowerCamelCase )
A__ = noise
A__ = model(**_lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def A__ ( self : int ):
A__ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_lowerCamelCase )
A__ = 4
A__ = 3
A__ = (2_5_6, 2_5_6)
A__ = torch.ones((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
A__ = torch.tensor(batch_size * [1E-4] ).to(_lowerCamelCase )
with torch.no_grad():
A__ = model(_lowerCamelCase , _lowerCamelCase ).sample
A__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A__ = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1E-2 ) )
def A__ ( self : Optional[Any] ):
A__ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_lowerCamelCase )
A__ = 4
A__ = 3
A__ = (3_2, 3_2)
A__ = torch.ones((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
A__ = torch.tensor(batch_size * [1E-4] ).to(_lowerCamelCase )
with torch.no_grad():
A__ = model(_lowerCamelCase , _lowerCamelCase ).sample
A__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A__ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1E-2 ) )
def A__ ( self : Union[str, Any] ):
pass
| 571
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( ):
__lowercase = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
__lowercase = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , "func" ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 402
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
lowercase_ : List[str] = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase_ : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase_ : Union[str, Any] = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 719
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = BarthezTokenizer
_UpperCamelCase : List[str] = BarthezTokenizerFast
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : int = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer
def __a ( self ):
_lowercase : Dict = '<pad>'
_lowercase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_lowerCAmelCase ) , 1_0_1_1_2_2 )
def __a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def __a ( self ):
_lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : Optional[Any] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_lowercase : Any = self.tokenizer(
_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_lowercase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
if not self.test_rust_tokenizer:
return
_lowercase : int = self.get_tokenizer()
_lowercase : Tuple = self.get_rust_tokenizer()
_lowercase : List[str] = 'I was born in 92000, and this is falsé.'
_lowercase : str = tokenizer.tokenize(_lowerCAmelCase )
_lowercase : Optional[int] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowercase : Tuple = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = self.get_rust_tokenizer()
_lowercase : Optional[int] = tokenizer.encode(_lowerCAmelCase )
_lowercase : int = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __a ( self ):
# fmt: off
_lowercase : List[str] = {'input_ids': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_lowercase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_lowerCAmelCase , )
| 66
|
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 66
| 1
|
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class A_ ( UpperCamelCase_ ):
_SCREAMING_SNAKE_CASE = """tapas"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_05_22 , __SCREAMING_SNAKE_CASE : Optional[int]=7_68 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Dict=30_72 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : str=10_24 , __SCREAMING_SNAKE_CASE : int=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=10.0 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=1.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Tuple=1.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any="ratio" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(pad_token_id=_a , **_a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_sizes
__a = initializer_range
__a = layer_norm_eps
# Fine-tuning task hyperparameters
__a = positive_label_weight
__a = num_aggregation_labels
__a = aggregation_loss_weight
__a = use_answer_as_supervision
__a = answer_loss_importance
__a = use_normalized_answer_loss
__a = huber_loss_delta
__a = temperature
__a = aggregation_temperature
__a = use_gumbel_for_cells
__a = use_gumbel_for_aggregation
__a = average_approximation_function
__a = cell_selection_preference
__a = answer_loss_cutoff
__a = max_num_rows
__a = max_num_columns
__a = average_logits_per_cell
__a = select_one_column
__a = allow_empty_column_selection
__a = init_cell_selection_weights_to_zero
__a = reset_position_index_per_cell
__a = disable_per_token_loss
# Aggregation hyperparameters
__a = aggregation_labels
__a = no_aggregation_label_index
if isinstance(self.aggregation_labels , _a ):
__a = {int(_a ): v for k, v in aggregation_labels.items()}
| 712
|
class A_ :
def __init__( self : List[Any] ):
__a = {} # Mapping from char to TrieNode
__a = False
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : list[str] ):
for word in words:
self.insert(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : str ):
__a = self
for char in word:
if char not in curr.nodes:
__a = TrieNode()
__a = curr.nodes[char]
__a = True
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
__a = self
for char in word:
if char not in curr.nodes:
return False
__a = curr.nodes[char]
return curr.is_leaf
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
def _delete(__SCREAMING_SNAKE_CASE : TrieNode , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> bool:
if index == len(__SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
__a = False
return len(curr.nodes ) == 0
__a = word[index]
__a = curr.nodes.get(__SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__a = _delete(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __SCREAMING_SNAKE_CASE , 0 )
def __A ( _A , _A ):
"""simple docstring"""
if node.is_leaf:
print(_A , end=" " )
for key, value in node.nodes.items():
print_words(_A , word + key )
def __A ( ):
"""simple docstring"""
__a = "banana bananas bandana band apple all beast".split()
__a = TrieNode()
root.insert_many(_A )
# print_words(root, "")
assert all(root.find(_A ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __A ( _A , _A ):
"""simple docstring"""
print(str(_A ) , "works!" if passes else "doesn't work :(" )
def __A ( ):
"""simple docstring"""
assert test_trie()
def __A ( ):
"""simple docstring"""
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 525
| 0
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : List[Any] ,A_ : Optional[int]=14 ,A_ : int=7 ,A_ : Dict=True ,A_ : str=True ,A_ : List[str]=False ,A_ : Optional[Any]=True ,A_ : int=99 ,A_ : List[str]=32 ,A_ : Optional[Any]=4 ,A_ : int=4 ,A_ : int=4 ,A_ : Dict=37 ,A_ : Tuple="gelu" ,A_ : Tuple=0.1 ,A_ : Union[str, Any]=0.1 ,A_ : Dict=512 ,A_ : Optional[int]=0.02 ,) -> Union[str, Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = rotary_dim
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = initializer_range
A = None
A = vocab_size - 1
A = vocab_size - 1
A = vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = GPTJConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,use_cache=A_ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,rotary_dim=self.rotary_dim ,)
return (config, input_ids, input_mask)
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ) -> List[Any]:
A = 20
A = model_class_name(A_ )
A = model.init_cache(input_ids.shape[0] ,A_ )
A = jnp.ones((input_ids.shape[0], max_decoder_length) ,dtype='i4' )
A = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) )
A = model(
input_ids[:, :-1] ,attention_mask=A_ ,past_key_values=A_ ,position_ids=A_ ,)
A = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype='i4' )
A = model(
input_ids[:, -1:] ,attention_mask=A_ ,past_key_values=outputs_cache.past_key_values ,position_ids=A_ ,)
A = model(A_ )
A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F'Max diff is {diff}' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : Union[str, Any] ,A_ : int ,A_ : Optional[Any] ) -> Optional[int]:
A = 20
A = model_class_name(A_ )
A = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] ,axis=-1 ,)
A = model.init_cache(input_ids.shape[0] ,A_ )
A = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) )
A = model(
input_ids[:, :-1] ,attention_mask=A_ ,past_key_values=A_ ,position_ids=A_ ,)
A = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype='i4' )
A = model(
input_ids[:, -1:] ,past_key_values=outputs_cache.past_key_values ,attention_mask=A_ ,position_ids=A_ ,)
A = model(A_ ,attention_mask=A_ )
A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F'Max diff is {diff}' )
@require_flax
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_lowerCamelCase: Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = FlaxGPTJModelTester(self )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
A , A , A = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
for model_class_name in self.all_model_classes:
A , A , A = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
A_ ,A_ ,A_ ,A_ )
@tooslow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = GPTaTokenizer.from_pretrained('gpt2' ,pad_token='<|endoftext|>' ,padding_side='left' )
A = tokenizer(['Hello this is a long string', 'Hey'] ,return_tensors='np' ,padding=A_ ,truncation=A_ )
A = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
A = False
A = model.config.eos_token_id
A = jax.jit(model.generate )
A = jit_generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,pad_token_id=tokenizer.pad_token_id ).sequences
A = tokenizer.batch_decode(A_ ,skip_special_tokens=A_ )
A = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(A_ ,A_ )
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A = self._prepare_for_class(A_ ,A_ )
A = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A = model_class.__name__[4:] # Skip the "Flax" at the beginning
A = getattr(A_ ,A_ )
A , A = pt_inputs['input_ids'].shape
A = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
A = 0
A = 1
A = 0
A = 1
A = pt_model_class(A_ ).eval()
A = model_class(A_ ,dtype=jnp.floataa )
A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,A_ )
A = fx_state
with torch.no_grad():
A = pt_model(**A_ ).to_tuple()
A = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) ,len(A_ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(A_ ,A_ ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
A = model_class.from_pretrained(A_ ,from_pt=A_ )
A = fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) ,len(A_ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(A_ ,A_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A = self._prepare_for_class(A_ ,A_ )
A = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A = model_class.__name__[4:] # Skip the "Flax" at the beginning
A = getattr(A_ ,A_ )
A = pt_model_class(A_ ).eval()
A = model_class(A_ ,dtype=jnp.floataa )
A = load_flax_weights_in_pytorch_model(A_ ,fx_model.params )
A , A = pt_inputs['input_ids'].shape
A = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
A = 0
A = 1
A = 0
A = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A = pt_model(**A_ ).to_tuple()
A = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) ,len(A_ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(A_ ,A_ ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
A = pt_model_class.from_pretrained(A_ ,from_flax=A_ )
with torch.no_grad():
A = pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) ,len(A_ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(A_ ,A_ ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
| 91
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase :Union[str, Any] = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] ='''deit'''
def __init__( self: Optional[int] , __a: Optional[int]=768 , __a: int=12 , __a: List[Any]=12 , __a: List[Any]=3_072 , __a: Any="gelu" , __a: Optional[Any]=0.0 , __a: Dict=0.0 , __a: Dict=0.02 , __a: int=1e-1_2 , __a: int=224 , __a: Tuple=16 , __a: List[Any]=3 , __a: Union[str, Any]=True , __a: Union[str, Any]=16 , **__a: int , )-> Union[str, Any]:
super().__init__(**__a )
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : List[str] = image_size
lowerCamelCase : int = patch_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : Tuple = qkv_bias
lowerCamelCase : Tuple = encoder_stride
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =version.parse('''1.11''')
@property
def a__ ( self: Optional[int] )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self: Union[str, Any] )-> float:
return 1e-4
| 222
| 0
|
from typing import List
from .keymap import KEYMAP, get_character
def A ( __UpperCamelCase ) -> Optional[int]:
def decorator(__UpperCamelCase ):
A__ = getattr(__UpperCamelCase , 'handle_key' , [] )
handle += [key]
setattr(__UpperCamelCase , 'handle_key' , __UpperCamelCase )
return func
return decorator
def A ( *__UpperCamelCase ) -> Any:
def decorator(__UpperCamelCase ):
A__ = getattr(__UpperCamelCase , 'handle_key' , [] )
handle += keys
setattr(__UpperCamelCase , 'handle_key' , __UpperCamelCase )
return func
return decorator
class __lowerCAmelCase ( A_ ):
"""simple docstring"""
def __new__( cls : int , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , 'key_handler' ):
setattr(_snake_case , 'key_handler' , {} )
setattr(_snake_case , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A__ = getattr(_snake_case , 'handle_key' , [] )
for key in handled_keys:
A__ = value
return new_cls
@staticmethod
def _a ( cls : List[Any] ):
"""simple docstring"""
A__ = get_character()
if char != KEYMAP["undefined"]:
A__ = ord(_snake_case )
A__ = cls.key_handler.get(_snake_case )
if handler:
A__ = char
return handler(cls )
else:
return None
def A ( cls ) -> int:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 707
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73
|
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23
| 0
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """\n""".join(SCREAMING_SNAKE_CASE__ )
Path(SCREAMING_SNAKE_CASE__ ).open("""w""" ).writelines(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple ='patrickvonplaten/t5-tiny-random'
lowerCAmelCase : str ='sshleifer/bart-tiny-random'
lowerCAmelCase : List[Any] ='sshleifer/tiny-mbart'
lowerCAmelCase : List[str] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> Dict:
lowerCAmelCase : Dict = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCAmelCase : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCAmelCase : Dict = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(lowercase_ , lowercase_ )
lowerCAmelCase : List[str] = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
lowerCAmelCase : int = """translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCAmelCase : Union[str, Any] = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_generate()
assert Path(lowercase_ ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self ) -> Union[str, Any]:
self.run_eval_tester(lowercase_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self , lowercase_ ) -> Optional[int]:
self.run_eval_tester(lowercase_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self , lowercase_ ) -> Optional[int]:
lowerCAmelCase : List[str] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCAmelCase : str = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCAmelCase : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
lowerCAmelCase : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() )
lowerCAmelCase : str = str(tmp_dir / """scores.json""" )
lowerCAmelCase : List[str] = str(tmp_dir / """val.target""" )
_dump_articles(lowercase_ , text["""en"""] )
_dump_articles(lowercase_ , text["""de"""] )
lowerCAmelCase : Any = """translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCAmelCase : Any = f"""
run_eval_search.py
{model}
{str(lowercase_ )}
{str(lowercase_ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(lowercase_ , """argv""" , lowercase_ ):
with CaptureStdout() as cs:
run_search()
lowerCAmelCase : Union[str, Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
lowerCAmelCase : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(lowercase_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase_ ).exists()
os.remove(Path(lowercase_ ) )
| 693
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693
| 1
|
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCamelCase__: str =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCamelCase__: Tuple =arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCamelCase__: Optional[Any] =arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 59
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A ( __UpperCamelCase ) -> Optional[int]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A ( ) -> Any:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
A__ = [1, 2, 3]
with pytest.raises(__UpperCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=2 )
with pytest.raises(__UpperCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def A ( __UpperCamelCase ) -> List[Any]:
A__ = [1, 2]
A__ = {'a': 1, 'b': 2}
A__ = {'a': [1, 2], 'b': [3, 4]}
A__ = {'a': {'1': 1}, 'b': 2}
A__ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
A__ = [2, 3]
A__ = {'a': 2, 'b': 3}
A__ = {'a': [2, 3], 'b': [4, 5]}
A__ = {'a': {'1': 2}, 'b': 3}
A__ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
| 52
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 1
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
snake_case = "align_text_model"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=3_0522 , SCREAMING_SNAKE_CASE_ : Optional[int]=768 , SCREAMING_SNAKE_CASE_ : str=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : List[Any]=3072 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=512 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1e-12 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : List[str]="absolute" , SCREAMING_SNAKE_CASE_ : Tuple=True , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
super().__init__(**_snake_case )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = pad_token_id
@classmethod
def __UpperCAmelCase ( cls : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[str] ):
cls._set_token_in_kwargs(_snake_case )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_snake_case , **_snake_case )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCamelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
snake_case = "align_vision_model"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 600 , SCREAMING_SNAKE_CASE_ : float = 2.0 , SCREAMING_SNAKE_CASE_ : float = 3.1 , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , SCREAMING_SNAKE_CASE_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , SCREAMING_SNAKE_CASE_ : List[int] = [] , SCREAMING_SNAKE_CASE_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE_ : float = 0.2_5 , SCREAMING_SNAKE_CASE_ : str = "swish" , SCREAMING_SNAKE_CASE_ : int = 2560 , SCREAMING_SNAKE_CASE_ : str = "mean" , SCREAMING_SNAKE_CASE_ : float = 0.0_2 , SCREAMING_SNAKE_CASE_ : float = 0.0_0_1 , SCREAMING_SNAKE_CASE_ : float = 0.9_9 , SCREAMING_SNAKE_CASE_ : float = 0.2 , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(**_snake_case )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = width_coefficient
lowerCamelCase__ = depth_coefficient
lowerCamelCase__ = depth_divisor
lowerCamelCase__ = kernel_sizes
lowerCamelCase__ = in_channels
lowerCamelCase__ = out_channels
lowerCamelCase__ = depthwise_padding
lowerCamelCase__ = strides
lowerCamelCase__ = num_block_repeats
lowerCamelCase__ = expand_ratios
lowerCamelCase__ = squeeze_expansion_ratio
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dim
lowerCamelCase__ = pooling_type
lowerCamelCase__ = initializer_range
lowerCamelCase__ = batch_norm_eps
lowerCamelCase__ = batch_norm_momentum
lowerCamelCase__ = drop_connect_rate
lowerCamelCase__ = sum(_snake_case ) * 4
@classmethod
def __UpperCAmelCase ( cls : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
cls._set_token_in_kwargs(_snake_case )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
snake_case = "align"
snake_case = True
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=640 , SCREAMING_SNAKE_CASE_ : List[str]=1.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(**_snake_case )
if text_config is None:
lowerCamelCase__ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
lowerCamelCase__ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
lowerCamelCase__ = AlignTextConfig(**_snake_case )
lowerCamelCase__ = AlignVisionConfig(**_snake_case )
lowerCamelCase__ = projection_dim
lowerCamelCase__ = temperature_init_value
lowerCamelCase__ = initializer_range
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : AlignTextConfig , SCREAMING_SNAKE_CASE_ : AlignVisionConfig , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 129
|
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
@dataclass
class __A :
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(default=a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__A = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A :
__A = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__A = field(
default=a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__A = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowercase ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
lowerCamelCase =import_module("""tasks""" )
try:
lowerCamelCase =getattr(_UpperCAmelCase , model_args.task_type )
lowerCamelCase =token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCamelCase =token_classification_task.get_labels(data_args.labels )
lowerCamelCase =dict(enumerate(_UpperCAmelCase ) )
lowerCamelCase =len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid={label: i for i, label in enumerate(_UpperCAmelCase )} , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCamelCase =AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase =(
TokenClassificationDataset(
token_classification_task=_UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , labels=_UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase =(
TokenClassificationDataset(
token_classification_task=_UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , labels=_UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_UpperCAmelCase , _UpperCAmelCase ) -> Tuple[List[int], List[int]]:
lowerCamelCase =np.argmax(_UpperCAmelCase , axis=2 )
lowerCamelCase , lowerCamelCase =preds.shape
lowerCamelCase =[[] for _ in range(_UpperCAmelCase )]
lowerCamelCase =[[] for _ in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_UpperCAmelCase ) -> Dict:
lowerCamelCase , lowerCamelCase =align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_UpperCAmelCase , _UpperCAmelCase ),
"precision": precision_score(_UpperCAmelCase , _UpperCAmelCase ),
"recall": recall_score(_UpperCAmelCase , _UpperCAmelCase ),
"f1": fa_score(_UpperCAmelCase , _UpperCAmelCase ),
}
# Data collator
lowerCamelCase =DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase =Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase =trainer.evaluate()
lowerCamelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _UpperCAmelCase , _UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_UpperCAmelCase )
# Predict
if training_args.do_predict:
lowerCamelCase =TokenClassificationDataset(
token_classification_task=_UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , labels=_UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCamelCase , lowerCamelCase , lowerCamelCase =trainer.predict(_UpperCAmelCase )
lowerCamelCase , lowerCamelCase =align_predictions(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , _UpperCAmelCase , _UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
lowerCamelCase =os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return results
def _lowercase ( _UpperCAmelCase ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 269
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __A ( a ):
__A = 42
class __A ( nn.Module ):
def __init__( self , UpperCAmelCase_=3 , UpperCAmelCase_=3 , UpperCAmelCase_=("DownEncoderBlock2D",) , UpperCAmelCase_=(64,) , UpperCAmelCase_=2 , UpperCAmelCase_=32 , UpperCAmelCase_="silu" , UpperCAmelCase_=True , ):
super().__init__()
lowerCamelCase =layers_per_block
lowerCamelCase =torch.nn.Convad(
UpperCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase =None
lowerCamelCase =nn.ModuleList([] )
# down
lowerCamelCase =block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase_ ):
lowerCamelCase =output_channel
lowerCamelCase =block_out_channels[i]
lowerCamelCase =i == len(UpperCAmelCase_ ) - 1
lowerCamelCase =get_down_block(
UpperCAmelCase_ , num_layers=self.layers_per_block , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
lowerCamelCase =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# out
lowerCamelCase =nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase_ , eps=1E-6 )
lowerCamelCase =nn.SiLU()
lowerCamelCase =2 * out_channels if double_z else out_channels
lowerCamelCase =nn.Convad(block_out_channels[-1] , UpperCAmelCase_ , 3 , padding=1 )
lowerCamelCase =False
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =x
lowerCamelCase =self.conv_in(UpperCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ ):
def custom_forward(*UpperCAmelCase_ ):
return module(*UpperCAmelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
lowerCamelCase =torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
# middle
lowerCamelCase =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
else:
for down_block in self.down_blocks:
lowerCamelCase =torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ )
# middle
lowerCamelCase =torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase =down_block(UpperCAmelCase_ )
# middle
lowerCamelCase =self.mid_block(UpperCAmelCase_ )
# post-process
lowerCamelCase =self.conv_norm_out(UpperCAmelCase_ )
lowerCamelCase =self.conv_act(UpperCAmelCase_ )
lowerCamelCase =self.conv_out(UpperCAmelCase_ )
return sample
class __A ( nn.Module ):
def __init__( self , UpperCAmelCase_=3 , UpperCAmelCase_=3 , UpperCAmelCase_=("UpDecoderBlock2D",) , UpperCAmelCase_=(64,) , UpperCAmelCase_=2 , UpperCAmelCase_=32 , UpperCAmelCase_="silu" , UpperCAmelCase_="group" , ):
super().__init__()
lowerCamelCase =layers_per_block
lowerCamelCase =nn.Convad(
UpperCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase =None
lowerCamelCase =nn.ModuleList([] )
lowerCamelCase =in_channels if norm_type == """spatial""" else None
# mid
lowerCamelCase =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# up
lowerCamelCase =list(reversed(UpperCAmelCase_ ) )
lowerCamelCase =reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
lowerCamelCase =output_channel
lowerCamelCase =reversed_block_out_channels[i]
lowerCamelCase =i == len(UpperCAmelCase_ ) - 1
lowerCamelCase =get_up_block(
UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , resnet_time_scale_shift=UpperCAmelCase_ , )
self.up_blocks.append(UpperCAmelCase_ )
lowerCamelCase =output_channel
# out
if norm_type == "spatial":
lowerCamelCase =SpatialNorm(block_out_channels[0] , UpperCAmelCase_ )
else:
lowerCamelCase =nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase_ , eps=1E-6 )
lowerCamelCase =nn.SiLU()
lowerCamelCase =nn.Convad(block_out_channels[0] , UpperCAmelCase_ , 3 , padding=1 )
lowerCamelCase =False
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None ):
lowerCamelCase =z
lowerCamelCase =self.conv_in(UpperCAmelCase_ )
lowerCamelCase =next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ ):
def custom_forward(*UpperCAmelCase_ ):
return module(*UpperCAmelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
lowerCamelCase =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
lowerCamelCase =sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
lowerCamelCase =torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
else:
# middle
lowerCamelCase =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
lowerCamelCase =torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# middle
lowerCamelCase =self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
lowerCamelCase =up_block(UpperCAmelCase_ , UpperCAmelCase_ )
# post-process
if latent_embeds is None:
lowerCamelCase =self.conv_norm_out(UpperCAmelCase_ )
else:
lowerCamelCase =self.conv_norm_out(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =self.conv_act(UpperCAmelCase_ )
lowerCamelCase =self.conv_out(UpperCAmelCase_ )
return sample
class __A ( nn.Module ):
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="random" , UpperCAmelCase_=False , UpperCAmelCase_=True ):
super().__init__()
lowerCamelCase =n_e
lowerCamelCase =vq_embed_dim
lowerCamelCase =beta
lowerCamelCase =legacy
lowerCamelCase =nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase =remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase =self.used.shape[0]
lowerCamelCase =unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase =self.re_embed
lowerCamelCase =self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowerCamelCase =n_e
lowerCamelCase =sane_index_shape
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =inds.shape
assert len(UpperCAmelCase_ ) > 1
lowerCamelCase =inds.reshape(ishape[0] , -1 )
lowerCamelCase =self.used.to(UpperCAmelCase_ )
lowerCamelCase =(inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase =match.argmax(-1 )
lowerCamelCase =match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase =torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase =self.unknown_index
return new.reshape(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =inds.shape
assert len(UpperCAmelCase_ ) > 1
lowerCamelCase =inds.reshape(ishape[0] , -1 )
lowerCamelCase =self.used.to(UpperCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase =0 # simply set to zero
lowerCamelCase =torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase_ )
return back.reshape(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
# reshape z -> (batch, height, width, channel) and flatten
lowerCamelCase =z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase =z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase =torch.argmin(torch.cdist(UpperCAmelCase_ , self.embedding.weight ) , dim=1 )
lowerCamelCase =self.embedding(UpperCAmelCase_ ).view(z.shape )
lowerCamelCase =None
lowerCamelCase =None
# compute loss for embedding
if not self.legacy:
lowerCamelCase =self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase =torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase =z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase =z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase =min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase =self.remap_to_used(UpperCAmelCase_ )
lowerCamelCase =min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase =min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCamelCase =indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase =self.unmap_to_all(UpperCAmelCase_ )
lowerCamelCase =indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase =self.embedding(UpperCAmelCase_ )
if shape is not None:
lowerCamelCase =z_q.view(UpperCAmelCase_ )
# reshape back to match original input shape
lowerCamelCase =z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __A ( a ):
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=False ):
lowerCamelCase =parameters
lowerCamelCase , lowerCamelCase =torch.chunk(UpperCAmelCase_ , 2 , dim=1 )
lowerCamelCase =torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
lowerCamelCase =deterministic
lowerCamelCase =torch.exp(0.5 * self.logvar )
lowerCamelCase =torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase =lowerCamelCase =torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self , UpperCAmelCase_ = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowerCamelCase =randn_tensor(
self.mean.shape , generator=UpperCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase =self.mean + self.std * sample
return x
def _snake_case ( self , UpperCAmelCase_=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase =np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase_ )
def _snake_case ( self ):
return self.mean
| 269
| 1
|
'''simple docstring'''
__lowerCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Return True if there is node that has not iterated.
_snake_case = [False] * len(_SCREAMING_SNAKE_CASE )
_snake_case = [s]
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_SCREAMING_SNAKE_CASE )
_snake_case = True
_snake_case = u
return visited[t]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = [-1] * (len(_SCREAMING_SNAKE_CASE ))
_snake_case = 0
_snake_case = []
_snake_case = [i[:] for i in graph] # Record original cut, copy.
while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = float("""Inf""" )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 585
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 585
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a__ :
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def a_ ( cls : List[str] , UpperCamelCase_ : CommonSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray):
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_)
@dataclass
class a__ ( __magic_name__ ):
lowercase_ = 42
class a__ ( __magic_name__ , __magic_name__ ):
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , UpperCamelCase_ : int = 1000 , UpperCamelCase_ : float = 0.0001 , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : str = "linear" , UpperCamelCase_ : Optional[jnp.ndarray] = None , UpperCamelCase_ : str = "fixed_small" , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = dtype
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[CommonSchedulerState] = None):
"""simple docstring"""
if common is None:
__UpperCAmelCase : Tuple = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
__UpperCAmelCase : Tuple = jnp.array(1.0 , dtype=self.dtype)
__UpperCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def a_ ( self : Optional[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[int] = None):
"""simple docstring"""
return sample
def a_ ( self : Any , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : int , UpperCamelCase_ : Tuple = ()):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : List[str] = (jnp.arange(0 , UpperCamelCase_) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def a_ ( self : Any , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=None):
"""simple docstring"""
__UpperCAmelCase : List[str] = state.common.alphas_cumprod[t]
__UpperCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCAmelCase : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__UpperCAmelCase : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__UpperCAmelCase : str = jnp.clip(UpperCamelCase_ , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__UpperCAmelCase : Optional[int] = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1e-20))
elif variance_type == "fixed_large":
__UpperCAmelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__UpperCAmelCase : str = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__UpperCAmelCase : Any = variance
__UpperCAmelCase : Union[str, Any] = state.common.betas[t]
__UpperCAmelCase : List[str] = (predicted_variance + 1) / 2
__UpperCAmelCase : int = frac * max_log + (1 - frac) * min_log
return variance
def a_ ( self : Optional[int] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[jax.random.KeyArray] = None , UpperCamelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Dict = timestep
if key is None:
__UpperCAmelCase : List[str] = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__UpperCAmelCase , __UpperCAmelCase : int = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1)
else:
__UpperCAmelCase : List[str] = None
# 1. compute alphas, betas
__UpperCAmelCase : str = state.common.alphas_cumprod[t]
__UpperCAmelCase : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
__UpperCAmelCase : Tuple = 1 - alpha_prod_t
__UpperCAmelCase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCAmelCase : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCAmelCase : List[Any] = jnp.clip(UpperCamelCase_ , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__UpperCAmelCase : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__UpperCAmelCase : int = jax.random.split(UpperCamelCase_ , num=1)
__UpperCAmelCase : Any = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_) ** 0.5) * noise
__UpperCAmelCase : Tuple = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
__UpperCAmelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def __len__( self : int):
"""simple docstring"""
return self.config.num_train_timesteps
| 487
| 0
|
def UpperCamelCase ( _a ) -> list:
'''simple docstring'''
if len(_a ) < 2:
return collection
def circle_sort_util(_a , _a , _a ) -> bool:
lowercase_ :List[str] = False
if low == high:
return swapped
lowercase_ :Dict = low
lowercase_ :Optional[Any] = high
while left < right:
if collection[left] > collection[right]:
lowercase_ , lowercase_ :List[str] = (
collection[right],
collection[left],
)
lowercase_ :Union[str, Any] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase_ , lowercase_ :Optional[int] = (
collection[right + 1],
collection[left],
)
lowercase_ :List[str] = True
lowercase_ :Tuple = low + int((high - low) / 2 )
lowercase_ :List[str] = circle_sort_util(_a , _a , _a )
lowercase_ :Union[str, Any] = circle_sort_util(_a , mid + 1 , _a )
return swapped or left_swap or right_swap
lowercase_ :List[Any] = True
while is_not_sorted is True:
lowercase_ :Optional[Any] = circle_sort_util(_a , 0 , len(_a ) - 1 )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : Optional[int] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 257
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE : Any = False
try:
SCREAMING_SNAKE_CASE : List[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = [] ):
lowercase_ :str = 0
lowercase_ :str = choices
lowercase_ :List[str] = prompt
if sys.platform == "win32":
lowercase_ :List[Any] = '''*'''
else:
lowercase_ :str = '''➔ '''
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = 1 ):
lowercase_ :Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def UpperCamelCase ( self ):
lowercase_ :int = int(chr(self.current_selection ) )
lowercase_ :Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def UpperCamelCase ( self , UpperCamelCase_ = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowercase_ :str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ :Optional[Any] = int(builtins.input() )
except ValueError:
lowercase_ :List[Any] = default_choice
else:
lowercase_ :List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 257
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = LxmertTokenizer
_lowerCamelCase = LxmertTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCAmelCase__ ( self ):
super().setUp()
__magic_name__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = '''UNwant\u00E9d,running'''
__magic_name__ = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
__magic_name__ = self.tokenizer_class(self.vocab_file )
__magic_name__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = '''I was born in 92000, and this is falsé.'''
__magic_name__ = tokenizer.tokenize(_UpperCAmelCase )
__magic_name__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__magic_name__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__magic_name__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = tokenizer.encode(_UpperCAmelCase )
__magic_name__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 714
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 190
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> list:
UpperCAmelCase_ = len(__UpperCamelCase )
for i in range(1 , __UpperCamelCase ):
UpperCAmelCase_ = collection[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = i - 1
while low <= high:
UpperCAmelCase_ = (low + high) // 2
if val < collection[mid]:
UpperCAmelCase_ = mid - 1
else:
UpperCAmelCase_ = mid + 1
for j in range(__UpperCamelCase , __UpperCamelCase , -1 ):
UpperCAmelCase_ = collection[j - 1]
UpperCAmelCase_ = val
return collection
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 144
|
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : List[str]=None ) -> Optional[Any]:
UpperCAmelCase_ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCAmelCase_ , UpperCAmelCase_ = True, True
UpperCAmelCase_ = dfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return path
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ) -> List[Any]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = -1
for i in range(__UpperCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCAmelCase_ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> str:
UpperCAmelCase_ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = check_circuit_or_path(__UpperCamelCase , __UpperCamelCase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
UpperCAmelCase_ = 1
if check == 2:
UpperCAmelCase_ = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
UpperCAmelCase_ = dfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
UpperCAmelCase_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCAmelCase_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCAmelCase_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCAmelCase_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCAmelCase_ = {
1: [],
2: []
# all degree is zero
}
UpperCAmelCase_ = 10
check_euler(__UpperCamelCase , __UpperCamelCase )
check_euler(__UpperCamelCase , __UpperCamelCase )
check_euler(__UpperCamelCase , __UpperCamelCase )
check_euler(__UpperCamelCase , __UpperCamelCase )
check_euler(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 144
| 1
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = 2
UpperCamelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase)
if n > 1:
factors.append(_UpperCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( _UpperCAmelCase : list[int]):
UpperCamelCase = len(_UpperCAmelCase) // 2
# choose the middle 3 elements
UpperCamelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m]) == 2:
m -= 1
return peak(lst[m:])
# decreasing
else:
if len(lst[:m]) == 2:
m += 1
return peak(lst[:m])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
| 1
|
import requests
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->None:
UpperCAmelCase = {"""Content-Type""": """application/json"""}
UpperCAmelCase = requests.post(lowerCAmelCase_ , json={"""text""": message_body} , headers=lowerCAmelCase_ )
if response.status_code != 2_0_0:
UpperCAmelCase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 377
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( __snake_case ):
def __init__( self : Dict , *__lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def _lowercase ( self : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str = "eval" ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(__lowerCamelCase )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
__lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions )
UpperCAmelCase = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase )
return metrics
def _lowercase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict=None , __lowerCamelCase : str = "test" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(__lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
__lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase )
| 377
| 1
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = data
_lowerCAmelCase = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _snake_case ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
return ((n << b) | (n >> (32 - b))) & 0XFF_FF_FF_FF
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
_lowerCAmelCase = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def _snake_case ( self ) -> List[Any]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = list(struct.unpack(">16L" , _lowerCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
_lowerCAmelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.padding()
_lowerCAmelCase = self.split_blocks()
for block in self.blocks:
_lowerCAmelCase = self.expand_block(_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_lowerCAmelCase = (b & c) | ((~b) & d)
_lowerCAmelCase = 0X5A_82_79_99
elif 20 <= i < 40:
_lowerCAmelCase = b ^ c ^ d
_lowerCAmelCase = 0X6E_D9_EB_A1
elif 40 <= i < 60:
_lowerCAmelCase = (b & c) | (b & d) | (c & d)
_lowerCAmelCase = 0X8F_1B_BC_DC
elif 60 <= i < 80:
_lowerCAmelCase = b ^ c ^ d
_lowerCAmelCase = 0XCA_62_C1_D6
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (
self.rotate(_lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(_lowerCAmelCase , 30 ),
c,
d,
)
_lowerCAmelCase = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __a():
'''simple docstring'''
_lowerCAmelCase = B"Test String"
assert SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE_ ).hexdigest() # noqa: S324
def __a():
'''simple docstring'''
_lowerCAmelCase = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_lowerCAmelCase = f.read()
else:
_lowerCAmelCase = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
print(SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 489
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = range(2, 20 + 1)
_SCREAMING_SNAKE_CASE = [10**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE = {}
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) )
_lowerCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) )
_lowerCAmelCase , _lowerCAmelCase = 0, 0
_lowerCAmelCase = n - i
_lowerCAmelCase = memo.get(SCREAMING_SNAKE_CASE_ )
if sub_memo is not None:
_lowerCAmelCase = sub_memo.get(SCREAMING_SNAKE_CASE_ )
if jumps is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
# find and make the largest jump without going over
_lowerCAmelCase = -1
for _k in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCAmelCase = _k
break
if max_jump >= 0:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCAmelCase = diff + c
for j in range(min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ):
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_lowerCAmelCase = []
else:
_lowerCAmelCase = {c: []}
_lowerCAmelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCAmelCase , _lowerCAmelCase = next_term(SCREAMING_SNAKE_CASE_ , k - 1 , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCAmelCase , _lowerCAmelCase = compute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
_lowerCAmelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCAmelCase = 0
while j < len(SCREAMING_SNAKE_CASE_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE_ , (diff, dn, k) )
return (diff, dn)
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE_ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCAmelCase = i
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCAmelCase = ds_c + ds_b
diff += addend
_lowerCAmelCase = 0
for j in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = a_i[j] + addend
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return diff, i - start_i
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCAmelCase = digits[j] + addend
if s >= 10:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
_lowerCAmelCase = addend // 10 + quotient
else:
_lowerCAmelCase = s
_lowerCAmelCase = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
digits.append(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : int = 10**15 ):
'''simple docstring'''
_lowerCAmelCase = [1]
_lowerCAmelCase = 1
_lowerCAmelCase = 0
while True:
_lowerCAmelCase , _lowerCAmelCase = next_term(SCREAMING_SNAKE_CASE_ , 20 , i + dn , SCREAMING_SNAKE_CASE_ )
dn += terms_jumped
if dn == n - i:
break
_lowerCAmelCase = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase : List[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : Any , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCamelCase : Dict , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCamelCase : Dict , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Any , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase : int , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase : int , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase : int , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCamelCase : int , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase : Dict , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase : List[str] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase : Tuple , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase : Any , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase : Any , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase : Dict , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""sentencepiece"""]
def __init__( self : Any , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 139
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = CustomTokenizer
pass
| 139
| 1
|
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[int] = eval_examples
__UpperCamelCase : int = post_process_function
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = "eval" , **_UpperCAmelCase , ) -> Dict[str, float]:
__UpperCamelCase : Dict = gen_kwargs.copy()
__UpperCamelCase : List[Any] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
__UpperCamelCase : Tuple = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
__UpperCamelCase : Any = gen_kwargs
__UpperCamelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCamelCase : int = self.get_eval_dataloader(_UpperCAmelCase )
__UpperCamelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCamelCase : Tuple = self.compute_metrics
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : str = time.time()
__UpperCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCamelCase : Optional[Any] = eval_loop(
_UpperCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
__UpperCamelCase : int = compute_metrics
__UpperCamelCase : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCamelCase : int = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : str = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
__UpperCamelCase : Dict = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
__UpperCamelCase : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCamelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase = "test" , **_UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Tuple = gen_kwargs.copy()
__UpperCamelCase : List[str] = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCamelCase : Optional[Any] = self.compute_metrics
__UpperCamelCase : Tuple = None
__UpperCamelCase : Any = time.time()
__UpperCamelCase : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCamelCase : Dict = eval_loop(
_UpperCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
__UpperCamelCase : Optional[int] = compute_metrics
__UpperCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCamelCase : Tuple = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , "predict" )
__UpperCamelCase : Union[str, Any] = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
__UpperCamelCase : Optional[int] = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 720
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399
| 0
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = old_name
if "patch_embed" in old_name:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = old_name.split("." )
if layer == "0":
UpperCAmelCase_ : List[Any] = old_name.replace("0" ,"convolution1" )
elif layer == "1":
UpperCAmelCase_ : List[str] = old_name.replace("1" ,"batchnorm_before" )
elif layer == "3":
UpperCAmelCase_ : Union[str, Any] = old_name.replace("3" ,"convolution2" )
else:
UpperCAmelCase_ : Union[str, Any] = old_name.replace("4" ,"batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" ,A__ ):
UpperCAmelCase_ : int = r"\b\d{2}\b"
if bool(re.search(A__ ,A__ ) ):
UpperCAmelCase_ : List[Any] = re.search(r"\d\.\d\d." ,A__ ).group()
else:
UpperCAmelCase_ : Tuple = re.search(r"\d\.\d." ,A__ ).group()
if int(match[0] ) < 6:
UpperCAmelCase_ : List[Any] = old_name.replace(A__ ,"" )
UpperCAmelCase_ : str = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1] )
UpperCAmelCase_ : List[Any] = "intermediate_stages." + trimmed_name
else:
UpperCAmelCase_ : Dict = old_name.replace(A__ ,"" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase_ : List[Any] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2] )
else:
UpperCAmelCase_ : Dict = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase_ : List[Any] = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
UpperCAmelCase_ : Tuple = trimmed_name.replace("norm1" ,"layernorm1" )
elif "norm2" in old_name:
UpperCAmelCase_ : Dict = trimmed_name.replace("norm2" ,"layernorm2" )
elif "fc1" in old_name:
UpperCAmelCase_ : Tuple = trimmed_name.replace("fc1" ,"linear_in" )
elif "fc2" in old_name:
UpperCAmelCase_ : str = trimmed_name.replace("fc2" ,"linear_out" )
UpperCAmelCase_ : str = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." ,A__ ):
UpperCAmelCase_ : Any = old_name.replace("network" ,"intermediate_stages" )
if "fc" in new_name:
UpperCAmelCase_ : Dict = new_name.replace("fc" ,"convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase_ : Optional[Any] = new_name.replace("norm1" ,"batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase_ : List[str] = new_name.replace("norm2" ,"batchnorm_after" )
if "proj" in new_name:
UpperCAmelCase_ : Dict = new_name.replace("proj" ,"projection" )
if "dist_head" in new_name:
UpperCAmelCase_ : Tuple = new_name.replace("dist_head" ,"distillation_classifier" )
elif "head" in new_name:
UpperCAmelCase_ : Any = new_name.replace("head" ,"classifier" )
elif "patch_embed" in new_name:
UpperCAmelCase_ : int = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase_ : Tuple = new_name.replace("norm" ,"layernorm" )
UpperCAmelCase_ : Optional[int] = "efficientformer." + new_name
else:
UpperCAmelCase_ : Any = "efficientformer.encoder." + new_name
return new_name
def snake_case ( A__ ,A__ ):
for key in checkpoint.copy().keys():
UpperCAmelCase_ : int = checkpoint.pop(A__ )
UpperCAmelCase_ : Tuple = val
return checkpoint
def snake_case ( ):
UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : int = Image.open(requests.get(A__ ,stream=A__ ).raw )
return image
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"]
UpperCAmelCase_ : Any = EfficientFormerConfig.from_json_file(A__ )
UpperCAmelCase_ : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(A__ )
UpperCAmelCase_ : Optional[int] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
UpperCAmelCase_ : Optional[Any] = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase_ : List[str] = convert_torch_checkpoint(A__ ,A__ )
model.load_state_dict(A__ )
model.eval()
UpperCAmelCase_ : Any = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Tuple = 2_56
UpperCAmelCase_ : Any = 2_24
UpperCAmelCase_ : str = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,)
UpperCAmelCase_ : List[str] = processor(images=A__ ,return_tensors="pt" ).pixel_values
# original processing pipeline
UpperCAmelCase_ : List[Any] = Compose(
[
Resize(A__ ,interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(A__ ),
ToTensor(),
Normalize(A__ ,A__ ),
] )
UpperCAmelCase_ : str = image_transforms(A__ ).unsqueeze(0 )
assert torch.allclose(A__ ,A__ )
UpperCAmelCase_ : Optional[Any] = model(A__ )
UpperCAmelCase_ : int = outputs.logits
UpperCAmelCase_ : Tuple = (1, 10_00)
if "l1" in model_name:
UpperCAmelCase_ : Dict = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] ,A__ ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase_ : Optional[Any] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] ,A__ ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase_ : int = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(A__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" ,commit_message="Add model" ,use_temp_dir=A__ ,)
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" ,commit_message="Add image processor" ,use_temp_dir=A__ ,)
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 95
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE = False
@property
def _lowerCAmelCase( self ) -> Optional[int]:
return 32
@property
def _lowerCAmelCase( self ) -> Optional[Any]:
return 32
@property
def _lowerCAmelCase( self ) -> List[Any]:
return self.time_input_dim
@property
def _lowerCAmelCase( self ) -> int:
return self.time_input_dim * 4
@property
def _lowerCAmelCase( self ) -> List[str]:
return 100
@property
def _lowerCAmelCase( self ) -> List[str]:
torch.manual_seed(0 )
lowercase__ : str = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ : Optional[int] = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def _lowerCAmelCase( self ) -> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[Any] = self.dummy_unet
lowercase__ : Optional[int] = self.dummy_movq
lowercase__ : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ : Union[str, Any] = DDIMScheduler(**__lowerCAmelCase )
lowercase__ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Dict:
lowercase__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
# create init_image
lowercase__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : int = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
lowercase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
lowercase__ : Dict = torch.manual_seed(__lowerCAmelCase )
else:
lowercase__ : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase__ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[int] = '''cpu'''
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : List[str] = self.pipeline_class(**__lowerCAmelCase )
lowercase__ : Any = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : int = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase__ : List[Any] = output.images
lowercase__ : str = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ : List[Any] = init_image.resize((512, 512) )
lowercase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase__ : str = torch.from_numpy(np.array(__lowerCAmelCase ) ).float() / 2_5_5.0
lowercase__ : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase__ : Union[str, Any] = '''A robot, 4k photo'''
lowercase__ : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase__ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase__ : Optional[Any] = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ : Optional[Any] = pipe_prior(
__lowerCAmelCase , image=__lowerCAmelCase , strength=0.8_5 , generator=__lowerCAmelCase , negative_prompt='''''' , ).to_tuple()
lowercase__ : Tuple = pipeline(
image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , hint=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 152
| 0
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =int(lowercase__ )
assert noofclusters < len(lowercase__ )
# Find out the dimensionality
a_ =len(vectors[0] )
# Will help select random centroids from among the available vectors
a_ =list(range(len(lowercase__ ) ) )
shuffle(lowercase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
a_ =tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
a_ =tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
a_ =[
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
a_ =tf.placeholder("float64" , [dim] )
a_ =[]
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
a_ =[tf.Variable(0 ) for i in range(len(lowercase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
a_ =tf.placeholder("int32" )
a_ =[]
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
a_ =tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
a_ =tf.reduce_mean(lowercase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
a_ =tf.placeholder("float" , [dim] )
a_ =tf.placeholder("float" , [dim] )
a_ =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase__ , lowercase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
a_ =tf.placeholder("float" , [noofclusters] )
a_ =tf.argmin(lowercase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
a_ =tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
a_ =1_0_0
for _ in range(lowercase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase__ ) ):
a_ =vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
a_ =[
sess.run(lowercase__ , feed_dict={va: vect, va: sess.run(lowercase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
a_ =sess.run(
lowercase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase__ ):
# Collect all the vectors assigned to this cluster
a_ =[
vectors[i]
for i in range(len(lowercase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
a_ =sess.run(
lowercase__ , feed_dict={mean_input: array(lowercase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
a_ =sess.run(lowercase__ )
a_ =sess.run(lowercase__ )
return centroids, assignments
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase__ : Dict = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase__ : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Union[str, Any] = config["""lr"""]
UpperCAmelCase__ : List[Any] = int(config["""num_epochs"""] )
UpperCAmelCase__ : Dict = int(config["""seed"""] )
UpperCAmelCase__ : str = int(config["""batch_size"""] )
UpperCAmelCase__ : str = args.model_name_or_path
set_seed(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
UpperCAmelCase__ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase__ : Tuple = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase__ : int = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Tuple = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase__ : Dict = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
UpperCAmelCase__ : str = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase__ : Optional[Any] = 0
# Now we train the model
UpperCAmelCase__ : str = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : int = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : List[Any] = model(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = outputs.loss
UpperCAmelCase__ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase__ : Dict = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : int = model(**__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
UpperCAmelCase__ : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCAmelCase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
UpperCAmelCase__ : str = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase__ : List[Any] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCamelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=3 , help="""Number of train epochs.""" , )
UpperCAmelCase__ : List[Any] = parser.parse_args()
UpperCAmelCase__ : Optional[int] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 65
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
UpperCAmelCase__ : int = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16_000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
# load decoder from hub
UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder"""
def __lowercase ( self : str ,**A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : List[str] ,**A : Dict ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Any ,**A : List[Any] ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A )
def __lowercase ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(A ,"""include""" ):
WavaVecaProcessorWithLM(
tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" )
UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : List[Any] = """This is a test string"""
UpperCAmelCase__ : int = processor(text=A )
UpperCAmelCase__ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ):
'''simple docstring'''
np.random.seed(A )
return np.random.rand(*A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
UpperCAmelCase__ : Tuple = processor.decode(A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __lowercase ( self : List[str] ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : List[str] = processor.batch_decode(A )
else:
with get_context(A ).Pool() as pool:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A )
UpperCAmelCase__ : Optional[Any] = list(A )
with get_context("""fork""" ).Pool() as p:
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A ,decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text )
self.assertListEqual(A ,decoded_processor.logit_score )
self.assertListEqual(A ,decoded_processor.lm_score )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Dict = self._get_dummy_logits()
UpperCAmelCase__ : Any = 15
UpperCAmelCase__ : Dict = -2_0.0
UpperCAmelCase__ : List[Any] = -4.0
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
UpperCAmelCase__ : List[str] = decoded_processor_out.text
UpperCAmelCase__ : List[str] = list(A )
with get_context("""fork""" ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A )
self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) )
self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()
UpperCAmelCase__ : Tuple = 2.0
UpperCAmelCase__ : str = 5.0
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : str = processor.batch_decode(
A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
UpperCAmelCase__ : Any = decoded_processor_out.text
UpperCAmelCase__ : Union[str, Any] = list(A )
decoder.reset_params(
alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
with get_context("""fork""" ).Pool() as pool:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(
A ,A ,)
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A )
UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,A )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[int] = os.listdir(A )
UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A )
UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCAmelCase__ : Tuple = os.listdir(A )
UpperCAmelCase__ : Dict = os.listdir(A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A ,A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Dict = floats_list((3, 1_000) )
UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" )
UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A )
UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
@staticmethod
def __lowercase ( A : Optional[Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Dict = self._get_dummy_logits()[0]
UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : int = self._get_dummy_logits()
UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __lowercase ( self : Tuple ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A )
UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Tuple = iter(A )
UpperCAmelCase__ : Optional[int] = next(A )
UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy()
UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A )
UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : Union[str, Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A )
self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text )
# output times
UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) )
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
| 65
| 1
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCAmelCase :str = logging.getLogger(__name__)
class UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , lowercase__=-1 ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = label_idx
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> List[InputExample]:
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE : Dict = mode.value
SCREAMING_SNAKE_CASE : Tuple = os.path.join(__a , F"""{mode}.txt""" )
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : int = []
with open(__a , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : int = []
else:
SCREAMING_SNAKE_CASE : List[Any] = line.split(' ' )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
return examples
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Any:
SCREAMING_SNAKE_CASE : List[Any] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE : List[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def _UpperCamelCase ( self , lowercase__ ) -> List[str]:
if path:
with open(__a , 'r' ) as f:
SCREAMING_SNAKE_CASE : Tuple = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self ) -> str:
super().__init__(label_idx=-2 )
def _UpperCamelCase ( self , lowercase__ ) -> List[str]:
if path:
with open(__a , 'r' ) as f:
SCREAMING_SNAKE_CASE : Any = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE : List[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> List[InputExample]:
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE : List[Any] = mode.value
SCREAMING_SNAKE_CASE : Dict = os.path.join(__a , F"""{mode}.txt""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = []
with open(__a , encoding='utf-8' ) as f:
for sentence in parse_incr(__a ):
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
return examples
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : int = 0
for sentence in parse_incr(__a ):
SCREAMING_SNAKE_CASE : Optional[Any] = preds_list[example_id]
SCREAMING_SNAKE_CASE : Any = ''
for token in sentence:
out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__a )
example_id += 1
def _UpperCamelCase ( self , lowercase__ ) -> List[str]:
if path:
with open(__a , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 717
|
'''simple docstring'''
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE : str = str(bin(a_ ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(a_ ) )[2:]
if shift_amount >= len(a_ ):
return "0b0"
SCREAMING_SNAKE_CASE : Dict = binary_number[: len(a_ ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE : Tuple = '0' + str(bin(a_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE : Union[str, Any] = len(bin(a_ )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE : Any = bin(abs(a_ ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE : Optional[Any] = (
'1' + '0' * (binary_number_length - len(a_ )) + binary_number
)
if shift_amount >= len(a_ ):
return "0b" + binary_number[0] * len(a_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase :str = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 487
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __snake_case ( _UpperCamelCase ) -> Optional[Any]:
_a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> List[str]:
_a , _a = emb.weight.shape
_a = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
_a = emb.weight.data
return lin_layer
def __snake_case ( _UpperCamelCase , _UpperCamelCase="facebook/mbart-large-en-ro" , _UpperCamelCase=False , _UpperCamelCase=False ) -> Union[str, Any]:
_a = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(_UpperCamelCase )
_a = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_a = MBartConfig.from_pretrained(_UpperCamelCase , vocab_size=_UpperCamelCase )
if mbart_aa and finetuned:
_a = '''relu'''
_a = state_dict['''decoder.embed_tokens.weight''']
_a = MBartForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase )
if finetuned:
_a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase :Optional[int] = parser.parse_args()
lowerCamelCase :Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 487
| 1
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : str , __a : Any , __a : int , __a : Union[str, Any]=None , __a : Any=None ):
"""simple docstring"""
if "." in tensor_name:
_a : Optional[int] = tensor_name.split('.' )
for split in splits[:-1]:
_a : Any = getattr(__a , __a )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_a : int = new_module
_a : Union[str, Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
_a : str = tensor_name in module._buffers
_a : List[str] = getattr(__a , __a )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
_a : Optional[Any] = False
_a : Optional[Any] = False
if is_buffer or not is_bitsandbytes_available():
_a : Dict = False
_a : List[Any] = False
else:
_a : Optional[Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_a : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : Any = old_value.to(__a )
elif isinstance(__a , torch.Tensor ):
_a : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
_a : str = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
_a : str = torch.tensor(__a , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __a ) and fpaa_statistics is None:
_a : List[str] = new_value.T
_a : int = old_value.__dict__
if is_abit:
_a : Dict = bnb.nn.IntaParams(__a , requires_grad=__a , **__a ).to(__a )
elif is_abit:
_a : Optional[Any] = bnb.nn.Paramsabit(__a , requires_grad=__a , **__a ).to(__a )
_a : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(__a ) )
else:
if value is None:
_a : Tuple = old_value.to(__a )
elif isinstance(__a , torch.Tensor ):
_a : Dict = value.to(__a )
else:
_a : Optional[Any] = torch.tensor(__a , device=__a )
if is_buffer:
_a : Any = new_value
else:
_a : List[Any] = nn.Parameter(__a , requires_grad=old_value.requires_grad )
_a : Tuple = new_value
def UpperCAmelCase_ (__a : Optional[int] , __a : Union[str, Any]=None , __a : Tuple=None , __a : int=None , __a : Tuple=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
_a : List[Any] = []
current_key_name.append(__a )
if (isinstance(__a , nn.Linear ) or isinstance(__a , __a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(__a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__a , __a ):
_a, _a : List[str] = module.weight.shape
else:
_a : List[str] = module.in_features
_a : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : int = bnb.nn.LinearabitLt(
__a , __a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_a : int = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : int = bnb.nn.Linearabit(
__a , __a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_a : Optional[Any] = True
# Store the module class in case we need to transpose the weight later
_a : List[Any] = type(__a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__a )
if len(list(module.children() ) ) > 0:
_a, _a : List[Any] = _replace_with_bnb_linear(
__a , __a , __a , __a , has_been_replaced=__a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase_ (__a : Optional[int] , __a : Tuple=None , __a : List[Any]=None , __a : Optional[Any]=None ):
"""simple docstring"""
_a : List[str] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
_a, _a : List[str] = _replace_with_bnb_linear(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase_ (*__a : int , **__a : Dict ):
"""simple docstring"""
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , __a , )
return replace_with_bnb_linear(*__a , **__a )
def UpperCAmelCase_ (*__a : int , **__a : List[str] ):
"""simple docstring"""
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , __a , )
return set_module_quantized_tensor_to_device(*__a , **__a )
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
_a : int = deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : Union[str, Any] = find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
_a : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_a : Optional[int] = sum(__a , [] )
_a : Union[str, Any] = len(__a ) > 0
# Check if it is a base model
_a : str = not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : int = list(model.named_children() )
_a : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
_a : Optional[int] = set(__a ) - set(__a )
_a : str = list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
_a : Any = ['.weight', '.bias']
_a : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Optional[Any] = name.replace(__a , '' )
filtered_module_names.append(__a )
return filtered_module_names
| 319
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = GPTSwaTokenizer
__UpperCAmelCase : Any = False
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_a : Any = GPTSwaTokenizer(_a ,eos_token='<unk>' ,bos_token='<unk>' ,pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] ,_a : Any ):
'''simple docstring'''
_a : Optional[int] = 'This is a test'
_a : str = 'This is a test'
return input_text, output_text
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = '<s>'
_a : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(_a ) ,2000 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,2000 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = GPTSwaTokenizer(_a )
_a : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(_a ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[465, 287, 265, 631, 842] )
_a : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_a ,['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] ,)
# fmt: on
_a : str = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a ,[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,)
_a : str = tokenizer.convert_ids_to_tokens(_a )
# fmt: off
self.assertListEqual(
_a ,['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = GPTSwaTokenizer(_a )
_a : List[Any] = ['This is a test', 'I was born in 92000, and this is falsé.']
_a : Optional[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_a ,_a ):
self.assertListEqual(tokenizer.encode_fast(_a ) ,_a )
# Test that decode_fast returns the input text
for text, token_ids in zip(_a ,_a ):
self.assertEqual(tokenizer.decode_fast(_a ) ,_a )
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
_a : Union[str, Any] = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name='AI-Sweden/gpt-sw3-126m' ,sequences=_a ,)
| 319
| 1
|
"""simple docstring"""
from collections import defaultdict
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] = first_str.lower().strip()
_lowerCamelCase : List[str] = second_str.lower().strip()
# Remove whitespace
_lowerCamelCase : str = first_str.replace(''' ''', '''''' )
_lowerCamelCase : Dict = second_str.replace(''' ''', '''''' )
# Strings of different lengths are not anagrams
if len(A_ ) != len(A_ ):
return False
# Default values for count should be 0
_lowerCamelCase : defaultdict[str, int] = defaultdict(A_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(A_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = input('''Enter the first string ''').strip()
lowerCAmelCase__ = input('''Enter the second string ''').strip()
lowerCAmelCase__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 83
|
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
SCREAMING_SNAKE_CASE_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
SCREAMING_SNAKE_CASE_ = 'UperNetConfig'
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 0 , snake_case_ = False , snake_case_ = 1 , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: str = nn.Convad(
in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=snake_case_ , padding=snake_case_ , bias=snake_case_ , dilation=snake_case_ , )
__UpperCAmelCase: int = nn.BatchNormad(snake_case_ )
__UpperCAmelCase: Optional[int] = nn.ReLU()
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = self.conv(snake_case_ )
__UpperCAmelCase: Any = self.batch_norm(snake_case_ )
__UpperCAmelCase: Tuple = self.activation(snake_case_ )
return output
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: List[str] = [
nn.AdaptiveAvgPoolad(snake_case_ ),
UperNetConvModule(snake_case_ , snake_case_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(snake_case_ ) , snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = input
for layer in self.layers:
__UpperCAmelCase: Optional[int] = layer(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Dict = pool_scales
__UpperCAmelCase: Dict = align_corners
__UpperCAmelCase: Tuple = in_channels
__UpperCAmelCase: Union[str, Any] = channels
__UpperCAmelCase: str = []
for i, pool_scale in enumerate(snake_case_ ):
__UpperCAmelCase: Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=snake_case_ , in_channels=snake_case_ , channels=snake_case_ )
self.blocks.append(snake_case_ )
self.add_module(str(snake_case_ ) , snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = []
for ppm in self.blocks:
__UpperCAmelCase: Optional[Any] = ppm(snake_case_ )
__UpperCAmelCase: Any = nn.functional.interpolate(
snake_case_ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(snake_case_ )
return ppm_outs
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Tuple = config
__UpperCAmelCase: Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
__UpperCAmelCase: int = in_channels
__UpperCAmelCase: Any = config.hidden_size
__UpperCAmelCase: Dict = False
__UpperCAmelCase: List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__UpperCAmelCase: str = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__UpperCAmelCase: Optional[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__UpperCAmelCase: Tuple = nn.ModuleList()
__UpperCAmelCase: Tuple = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__UpperCAmelCase: Any = UperNetConvModule(snake_case_ , self.channels , kernel_size=1 )
__UpperCAmelCase: List[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(snake_case_ )
self.fpn_convs.append(snake_case_ )
__UpperCAmelCase: int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase_ ( self ):
'''simple docstring'''
self.apply(self._init_weights )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = inputs[-1]
__UpperCAmelCase: Union[str, Any] = [x]
psp_outs.extend(self.psp_modules(snake_case_ ) )
__UpperCAmelCase: List[Any] = torch.cat(snake_case_ , dim=1 )
__UpperCAmelCase: Union[str, Any] = self.bottleneck(snake_case_ )
return output
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(snake_case_ ) )
# build top-down path
__UpperCAmelCase: List[str] = len(snake_case_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__UpperCAmelCase: Optional[Any] = laterals[i - 1].shape[2:]
__UpperCAmelCase: Tuple = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=snake_case_ , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
__UpperCAmelCase: Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__UpperCAmelCase: str = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
__UpperCAmelCase: List[str] = torch.cat(snake_case_ , dim=1 )
__UpperCAmelCase: Optional[Any] = self.fpn_bottleneck(snake_case_ )
__UpperCAmelCase: Optional[Any] = self.classifier(snake_case_ )
return output
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = 2 , snake_case_ = 3 , snake_case_ = 1 ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: int = config
__UpperCAmelCase: Union[str, Any] = config.auxiliary_in_channels
__UpperCAmelCase: Dict = config.auxiliary_channels
__UpperCAmelCase: Tuple = config.auxiliary_num_convs
__UpperCAmelCase: Union[str, Any] = config.auxiliary_concat_input
__UpperCAmelCase: List[str] = in_index
__UpperCAmelCase: Union[str, Any] = (kernel_size // 2) * dilation
__UpperCAmelCase: str = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=snake_case_ , padding=snake_case_ , dilation=snake_case_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=snake_case_ , padding=snake_case_ , dilation=snake_case_ ) )
if self.num_convs == 0:
__UpperCAmelCase: str = nn.Identity()
else:
__UpperCAmelCase: Union[str, Any] = nn.Sequential(*snake_case_ )
if self.concat_input:
__UpperCAmelCase: List[Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=snake_case_ , padding=kernel_size // 2 )
__UpperCAmelCase: List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase_ ( self ):
'''simple docstring'''
self.apply(self._init_weights )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = encoder_hidden_states[self.in_index]
__UpperCAmelCase: List[str] = self.convs(snake_case_ )
if self.concat_input:
__UpperCAmelCase: int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__UpperCAmelCase: Optional[Any] = self.classifier(snake_case_ )
return output
class a ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = UperNetConfig
__lowerCAmelCase = """pixel_values"""
__lowerCAmelCase = True
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase_ ( self ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase_ ( self , snake_case_ , snake_case_=False ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Any = value
SCREAMING_SNAKE_CASE_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , _UpperCamelCase , )
class a ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
__UpperCAmelCase: Optional[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__UpperCAmelCase: List[str] = UperNetHead(snake_case_ , in_channels=self.backbone.channels )
__UpperCAmelCase: List[str] = UperNetFCNHead(snake_case_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=snake_case_ , config_class=_CONFIG_FOR_DOC )
def lowercase_ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase: str = output_attentions if output_attentions is not None else self.config.output_attentions
__UpperCAmelCase: Any = self.backbone.forward_with_filtered_kwargs(
snake_case_ , output_hidden_states=snake_case_ , output_attentions=snake_case_ )
__UpperCAmelCase: List[str] = outputs.feature_maps
__UpperCAmelCase: Optional[int] = self.decode_head(snake_case_ )
__UpperCAmelCase: List[Any] = nn.functional.interpolate(snake_case_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case_ )
__UpperCAmelCase: str = None
if self.auxiliary_head is not None:
__UpperCAmelCase: Optional[int] = self.auxiliary_head(snake_case_ )
__UpperCAmelCase: List[Any] = nn.functional.interpolate(
snake_case_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case_ )
__UpperCAmelCase: int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
__UpperCAmelCase: str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__UpperCAmelCase: Union[str, Any] = loss_fct(snake_case_ , snake_case_ )
__UpperCAmelCase: Any = loss_fct(snake_case_ , snake_case_ )
__UpperCAmelCase: str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__UpperCAmelCase: List[Any] = (logits,) + outputs[1:]
else:
__UpperCAmelCase: Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 715
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE_ = get_logger(__name__)
SCREAMING_SNAKE_CASE_ = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class a :
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class a :
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class a ( __lowerCAmelCase ):
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
'''simple docstring'''
for processor in self:
__UpperCAmelCase: str = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
__UpperCAmelCase: List[Any] = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
__UpperCAmelCase: Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__UpperCAmelCase: Any = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = scores / self.temperature
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = -float("""Inf""" ) , snake_case_ = 1 ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__UpperCAmelCase: int = top_p
__UpperCAmelCase: str = filter_value
__UpperCAmelCase: List[Any] = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[Any] = lax.top_k(snake_case_ , scores.shape[-1] )
__UpperCAmelCase: Tuple = jnp.full_like(snake_case_ , self.filter_value )
__UpperCAmelCase: Optional[int] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
__UpperCAmelCase: List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCAmelCase: str = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
__UpperCAmelCase: str = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
__UpperCAmelCase: List[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase: Optional[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = -float("""Inf""" ) , snake_case_ = 1 ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__UpperCAmelCase: Optional[Any] = max(snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[Any] = scores.shape
__UpperCAmelCase: List[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCAmelCase: Any = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCAmelCase, __UpperCAmelCase: List[Any] = lax.top_k(snake_case_ , snake_case_ )
__UpperCAmelCase: Optional[Any] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCAmelCase: List[Any] = topk_scores.flatten()
__UpperCAmelCase: str = topk_indices.flatten() + shift
__UpperCAmelCase: List[Any] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
__UpperCAmelCase: Any = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase: Optional[Any] = 1 - jnp.bool_(cur_len - 1 )
__UpperCAmelCase: Any = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = max_length
__UpperCAmelCase: List[str] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase: List[str] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCAmelCase: Any = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__UpperCAmelCase: List[Any] = min_length
__UpperCAmelCase: Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCAmelCase: int = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = list(snake_case_ )
__UpperCAmelCase: Optional[Any] = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCAmelCase: str = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCAmelCase: Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCAmelCase: Union[str, Any] = force_token_array.at[index].set(snake_case_ )
__UpperCAmelCase: Union[str, Any] = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
def _force_token(snake_case_ ):
__UpperCAmelCase: List[Any] = scores.shape[0]
__UpperCAmelCase: int = self.force_token_array[generation_idx]
__UpperCAmelCase: Union[str, Any] = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float("""inf""" )
__UpperCAmelCase: List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCAmelCase: List[Any] = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
__UpperCAmelCase: Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = generate_config.eos_token_id
__UpperCAmelCase: Union[str, Any] = generate_config.no_timestamps_token_id
__UpperCAmelCase: Tuple = generate_config.no_timestamps_token_id + 1
__UpperCAmelCase: Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , """max_initial_timestamp_index""" ):
__UpperCAmelCase: Optional[int] = generate_config.max_initial_timestamp_index
else:
__UpperCAmelCase: List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCAmelCase: Any = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(snake_case_ , snake_case_ ):
__UpperCAmelCase: Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
__UpperCAmelCase: Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
__UpperCAmelCase: Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , snake_case_ , )
__UpperCAmelCase: Dict = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
__UpperCAmelCase: Dict = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
__UpperCAmelCase: List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
__UpperCAmelCase: List[str] = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCAmelCase: List[str] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCAmelCase: Union[str, Any] = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
__UpperCAmelCase: int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCAmelCase: Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , snake_case_ , )
__UpperCAmelCase: Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores
| 466
| 0
|
from ...processing_utils import ProcessorMixin
class __a ( A__ ):
_lowerCAmelCase : List[Any] = ['''image_processor''', '''feature_extractor''']
_lowerCAmelCase : Any = '''TvltImageProcessor'''
_lowerCAmelCase : List[str] = '''TvltFeatureExtractor'''
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = image_processor
UpperCamelCase__ : Optional[int] = feature_extractor
def __call__( self : int , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Any=False , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
UpperCamelCase__ : List[str] = None
if images is not None:
UpperCamelCase__ : Dict = self.image_processor(SCREAMING_SNAKE_CASE , mask_pixel=SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if images_mixed is not None:
UpperCamelCase__ : Tuple = self.image_processor(SCREAMING_SNAKE_CASE , is_mixed=SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if audio is not None:
UpperCamelCase__ : Optional[int] = self.feature_extractor(
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , mask_audio=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE )
return output_dict
@property
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.image_processor.model_input_names
UpperCamelCase__ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 228
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __a ( A__ ):
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_attention_heads" ) )
class __a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[Any]=64 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : Tuple=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE : Tuple=[4, 6, 8] , SCREAMING_SNAKE_CASE : Dict=[2, 3, 4] , SCREAMING_SNAKE_CASE : Any=[16, 16, 16] , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE : int=[2, 2, 2] , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : int=2 , ):
'''simple docstring'''
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : str = kernel_size
UpperCamelCase__ : str = stride
UpperCamelCase__ : int = padding
UpperCamelCase__ : int = hidden_sizes
UpperCamelCase__ : Dict = num_attention_heads
UpperCamelCase__ : int = depths
UpperCamelCase__ : Optional[Any] = key_dim
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : str = attention_ratio
UpperCamelCase__ : int = mlp_ratio
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Union[str, Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : List[str] = num_labels
UpperCamelCase__ : int = initializer_range
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = LevitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Optional[Any] = LevitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = config_and_inputs
UpperCamelCase__ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( A__ , A__ , unittest.TestCase ):
_lowerCAmelCase : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowerCAmelCase : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = False
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = LevitModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = outputs.hidden_states
UpperCamelCase__ : List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
UpperCamelCase__ : Optional[int] = problem_type["title"]
UpperCamelCase__ : Tuple = problem_type["num_labels"]
UpperCamelCase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
UpperCamelCase__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCamelCase__ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as warning_list:
UpperCamelCase__ : Any = model(**SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 228
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = '''gelu'''
def __init__( self : Any, _UpperCAmelCase : str, _UpperCAmelCase : Any=1_3, _UpperCAmelCase : List[str]=7, _UpperCAmelCase : Any=True, _UpperCAmelCase : Optional[Any]=False, _UpperCAmelCase : Tuple=9_9, _UpperCAmelCase : Optional[int]=3_2, _UpperCAmelCase : str=2, _UpperCAmelCase : Dict=4, _UpperCAmelCase : Dict=3_7, _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Optional[Any]=2_0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : int=0, ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Any = seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : int = bos_token_id
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : List[str] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_blenderbot_inputs_dict(A_, A_, A_ )
return config, inputs_dict
def A_ ( self : Any, _UpperCAmelCase : int, _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFBlenderbotModel(config=A_ ).get_decoder()
SCREAMING_SNAKE_CASE__ : str = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Tuple = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(A_, attention_mask=A_, head_mask=A_, use_cache=A_ )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = model(A_, attention_mask=A_ )[0]
SCREAMING_SNAKE_CASE__ : Any = model(A_, attention_mask=A_, past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[int] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_, A_, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = ConfigTester(self, config_class=A_ )
def A_ ( self : List[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = ['''My friends are cool but they eat too many carbs.''']
UpperCAmelCase_ = '''facebook/blenderbot-400M-distill'''
@cached_property
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Dict = self.model.generate(
model_inputs.input_ids, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 705
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowerCamelCase : Tuple = datasets.logging.get_logger(__name__)
_lowerCamelCase : Any = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_lowerCamelCase : Optional[int] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_lowerCamelCase : str = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : List[Any]="dummy_doc" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {doc: key_lines}
SCREAMING_SNAKE_CASE__ : Tuple = {doc: sys_lines}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE__ : str = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE__ : Tuple = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if remove_nested:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE__ : Dict = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = get_coref_infos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = {}
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE__ : List[Any] = (conll / 3) * 1_00
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE__ : Optional[int] = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE__ : Any = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ), codebase_urls=["https://github.com/ns-moosavi/coval"], reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
], )
def A_ ( self : Any, _UpperCAmelCase : Optional[int], _UpperCAmelCase : str, _UpperCAmelCase : str=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[Any]=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE__ : Any = util.check_gold_parse_annotation(_UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE__ : List[str] = evaluate(
key_lines=_UpperCAmelCase, sys_lines=_UpperCAmelCase, metrics=_UpperCAmelCase, NP_only=_UpperCAmelCase, remove_nested=_UpperCAmelCase, keep_singletons=_UpperCAmelCase, min_span=_UpperCAmelCase, )
return score
| 157
| 0
|
from itertools import permutations
def snake_case (UpperCAmelCase__ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase_: Optional[int] = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(UpperCAmelCase__ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def snake_case (UpperCAmelCase__ = 1_0 ) -> int:
return sum(
int(''.join(map(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
for num in permutations(range(UpperCAmelCase__ ) )
if is_substring_divisible(UpperCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
|
def _a ( lowercase__ : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ : str = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85
| 0
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a__ ( lowercase_ ):
__magic_name__ : Any = ""
__magic_name__ : Optional[int] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(self : List[str], __UpperCAmelCase : Optional[DatasetInfo] = None, __UpperCAmelCase : Optional[str] = None, **__UpperCAmelCase : Optional[int], ) -> Optional[Any]:
"""simple docstring"""
super().__init__(self, **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = repo_info
SCREAMING_SNAKE_CASE : Any = token
SCREAMING_SNAKE_CASE : Tuple = None
def lowercase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if self.dir_cache is None:
SCREAMING_SNAKE_CASE : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCamelCase_ ): {'''name''': str(lowerCamelCase_ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowercase__ (self : List[str], __UpperCAmelCase : str, __UpperCAmelCase : str = "rb", **__UpperCAmelCase : List[Any], ) -> Tuple:
"""simple docstring"""
if not isinstance(self.repo_info, lowerCamelCase_ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
SCREAMING_SNAKE_CASE : Dict = hf_hub_url(self.repo_info.id, lowerCamelCase_, revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase_, mode=lowerCamelCase_, headers=get_authentication_headers_for_url(lowerCamelCase_, use_auth_token=self.token ), client_kwargs={'''trust_env''': True}, ).open()
def lowercase__ (self : str, __UpperCAmelCase : int, **__UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
self._get_dirs()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._strip_protocol(lowerCamelCase_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase_ )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : int=False, **__UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self._get_dirs()
SCREAMING_SNAKE_CASE : str = PurePosixPath(path.strip('''/''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE : List[Any] = PurePosixPath(p.strip('''/''' ) )
SCREAMING_SNAKE_CASE : List[str] = p.parent
if root == path:
SCREAMING_SNAKE_CASE : Dict = f
SCREAMING_SNAKE_CASE : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 713
|
'''simple docstring'''
from manim import *
class a__ ( _lowercase ):
def lowercase__ (self : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5, width=0.5 )
SCREAMING_SNAKE_CASE : str = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE : List[str] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = VGroup(__UpperCAmelCase, __UpperCAmelCase ).arrange(__UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE : Tuple = Text('''CPU''', font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(__UpperCAmelCase, __UpperCAmelCase ).arrange(__UpperCAmelCase, buff=0.5, aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE : List[str] = Text('''GPU''', font_size=24 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Group(__UpperCAmelCase, __UpperCAmelCase ).arrange(__UpperCAmelCase, buff=0.5, aligned_edge=__UpperCAmelCase )
gpu.align_to(__UpperCAmelCase, __UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text('''Model''', font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(__UpperCAmelCase, __UpperCAmelCase ).arrange(__UpperCAmelCase, buff=0.5, aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__UpperCAmelCase, run_time=1 ), Create(__UpperCAmelCase, run_time=1 ), Create(__UpperCAmelCase, run_time=1 ), )
SCREAMING_SNAKE_CASE : Tuple = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase, run_time=2.5 ), Write(__UpperCAmelCase ), Write(__UpperCAmelCase ) )
self.add(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
for i, rect in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase, opacity=0.7 )
cpu_target.move_to(__UpperCAmelCase )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE : List[str] = 0.46 / 4
SCREAMING_SNAKE_CASE : Any = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=__UpperCAmelCase, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=__UpperCAmelCase, buff=0.0 )
cpu_targs.append(__UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__UpperCAmelCase ) )
second_animations.append(MoveToTarget(__UpperCAmelCase, run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 355
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_features''', '''is_longer''']
def __init__( self :str , __magic_name__ :Union[str, Any]=64 , __magic_name__ :Optional[int]=4_8000 , __magic_name__ :str=480 , __magic_name__ :int=10 , __magic_name__ :List[Any]=1024 , __magic_name__ :List[Any]=0.0 , __magic_name__ :Tuple=False , __magic_name__ :float = 0 , __magic_name__ :float = 1_4000 , __magic_name__ :int = None , __magic_name__ :str = "fusion" , __magic_name__ :str = "repeatpad" , **__magic_name__ :List[Any] , ):
'''simple docstring'''
super().__init__(
feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
a = top_db
a = truncation
a = padding
a = fft_window_size
a = (fft_window_size >> 1) + 1
a = hop_length
a = max_length_s
a = max_length_s * sampling_rate
a = sampling_rate
a = frequency_min
a = frequency_max
a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__magic_name__ , min_frequency=__magic_name__ , max_frequency=__magic_name__ , sampling_rate=__magic_name__ , norm=__magic_name__ , mel_scale="""htk""" , )
a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__magic_name__ , min_frequency=__magic_name__ , max_frequency=__magic_name__ , sampling_rate=__magic_name__ , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = copy.deepcopy(self.__dict__ )
a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase__ ( self :Dict , __magic_name__ :np.array , __magic_name__ :Optional[np.array] = None ):
'''simple docstring'''
a = spectrogram(
__magic_name__ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__magic_name__ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
a = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
a = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
a = [0]
# randomly choose index for each part
a = np.random.choice(ranges[0] )
a = np.random.choice(ranges[1] )
a = np.random.choice(ranges[2] )
a = mel[idx_front : idx_front + chunk_frames, :]
a = mel[idx_middle : idx_middle + chunk_frames, :]
a = mel[idx_back : idx_back + chunk_frames, :]
a = torch.tensor(mel[None, None, :] )
a = torch.nn.functional.interpolate(
__magic_name__ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=__magic_name__ )
a = mel_shrink[0][0].numpy()
a = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase__ ( self :Any , __magic_name__ :np.array , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :List[Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
a = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
a = len(__magic_name__ ) - max_length
a = np.random.randint(0 , overflow + 1 )
a = waveform[idx : idx + max_length]
a = self._np_extract_fbank_features(__magic_name__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
a = self._np_extract_fbank_features(__magic_name__ , self.mel_filters )
a = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
a = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
a = np.stack([mel, mel, mel, mel] , axis=0 )
a = False
else:
a = self._random_mel_fusion(__magic_name__ , __magic_name__ , __magic_name__ )
a = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
a = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
a = int(max_length / len(__magic_name__ ) )
a = np.stack(np.tile(__magic_name__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
a = int(max_length / len(__magic_name__ ) )
a = np.stack(np.tile(__magic_name__ , __magic_name__ ) )
a = np.pad(__magic_name__ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
a = self._np_extract_fbank_features(__magic_name__ , self.mel_filters )
a = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
a = self._np_extract_fbank_features(__magic_name__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self :List[Any] , __magic_name__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __magic_name__ :str = None , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[Union[str, TensorType]] = None , **__magic_name__ :List[str] , ):
'''simple docstring'''
a = truncation if truncation is not None else self.truncation
a = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
a = isinstance(__magic_name__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
a = is_batched_numpy or (
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray(__magic_name__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
a = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray(__magic_name__ )]
# convert to mel spectrogram, truncate and pad if needed.
a = [
self._get_input_mel(__magic_name__ , max_length if max_length else self.nb_max_samples , __magic_name__ , __magic_name__ )
for waveform in raw_speech
]
a = []
a = []
for mel, longer in padded_inputs:
input_mel.append(__magic_name__ )
is_longer.append(__magic_name__ )
if truncation == "fusion" and sum(__magic_name__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
a = np.random.randint(0 , len(__magic_name__ ) )
a = True
if isinstance(input_mel[0] , __magic_name__ ):
a = [np.asarray(__magic_name__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
a = [[longer] for longer in is_longer]
a = {"""input_features""": input_mel, """is_longer""": is_longer}
a = BatchFeature(__magic_name__ )
if return_tensors is not None:
a = input_features.convert_to_tensors(__magic_name__ )
return input_features
| 468
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = "▁"
__UpperCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : int = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCamelCase : Dict = {
"facebook/xglm-564M": 2_048,
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Tuple="<s>" , __magic_name__ :List[Any]="</s>" , __magic_name__ :List[str]="</s>" , __magic_name__ :Any="<s>" , __magic_name__ :Tuple="<unk>" , __magic_name__ :int="<pad>" , __magic_name__ :Optional[Dict[str, Any]] = None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
a = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
a = 7
a = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
a = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a = 1
# Mimic fairseq token-to-id alignment for the first 4 token
a = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
a = len(self.sp_model )
a = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__magic_name__ )
a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :List[Any] ):
'''simple docstring'''
a = self.__dict__.copy()
a = None
a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Any , __magic_name__ :Any ):
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
a = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ ))
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ ))
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self :Any , __magic_name__ :str ):
'''simple docstring'''
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Any ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = """""".join(__magic_name__ ).replace(__magic_name__ , """ """ ).strip()
return out_string
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 468
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='xmod'
def __init__( self : Any , _lowerCamelCase : Any=3_0_5_2_2 , _lowerCamelCase : int=7_6_8 , _lowerCamelCase : Union[str, Any]=1_2 , _lowerCamelCase : Any=1_2 , _lowerCamelCase : int=3_0_7_2 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[Any]=5_1_2 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : str="absolute" , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=False , _lowerCamelCase : List[str]=2 , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[str]=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Dict=("en_XX",) , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[str] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
snake_case__ : List[str] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : List[Any] = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : Tuple = use_cache
snake_case__ : Dict = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : str = adapter_reduction_factor
snake_case__ : Any = adapter_layer_norm
snake_case__ : Optional[int] = adapter_reuse_layer_norm
snake_case__ : List[Any] = ln_before_adapter
snake_case__ : Dict = list(_lowerCamelCase )
snake_case__ : Union[str, Any] = default_language
class snake_case__ ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 303
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase : Any = get_tests_dir('fixtures')
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : int = mock.Mock()
snake_case__ : Any = 5_0_0
snake_case__ : Dict = {}
snake_case__ : List[str] = HTTPError
snake_case__ : str = {}
# Download this model to make sure it's in the cache.
snake_case__ : List[str] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : Optional[Any] ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class snake_case__ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls : str ):
snake_case__ : Tuple = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def UpperCAmelCase__ ( self : int ):
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
snake_case__ : Any = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='test-feature-extractor' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
snake_case__ : Any = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case__ : Optional[int] = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
snake_case__ : int = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 303
| 1
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A_ : List[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Dict ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case__ : Optional[int] = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case__ : Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : List[str] = value
elif weight_type == "weight_g":
snake_case__ : Dict = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : List[Any] = value
else:
snake_case__ : Optional[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : List[Any] ) -> str:
'''simple docstring'''
snake_case__ : List[str] = []
snake_case__ : Any = fairseq_model.state_dict()
snake_case__ : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : List[str] = True
if "*" in mapped_key:
snake_case__ : Union[str, Any] = name.split(__magic_name__ )[0].split(""".""" )[-2]
snake_case__ : List[str] = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
snake_case__ : Any = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[int] = """weight"""
else:
snake_case__ : Tuple = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : str = name.split(""".""" )
snake_case__ : Union[str, Any] = int(items[0] )
snake_case__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=None ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = torch.load(__magic_name__ )
snake_case__ : str = WavLMConfigOrig(checkpoint["""cfg"""] )
snake_case__ : Tuple = WavLMOrig(__magic_name__ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
snake_case__ : Union[str, Any] = WavLMConfig.from_pretrained(__magic_name__ )
else:
snake_case__ : Tuple = WavLMConfig()
snake_case__ : str = WavLMModel(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavlm.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 38
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger("""transformers.models.speecht5""")
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
_UpperCamelCase : Any = checkpoint['input_conv.weight_g']
_UpperCamelCase : Dict = checkpoint['input_conv.weight_v']
_UpperCamelCase : List[Any] = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_UpperCamelCase : Optional[int] = checkpoint[F'''upsamples.{i}.1.weight_g''']
_UpperCamelCase : List[Any] = checkpoint[F'''upsamples.{i}.1.weight_v''']
_UpperCamelCase : Optional[int] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_UpperCamelCase : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
_UpperCamelCase : List[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
_UpperCamelCase : str = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
_UpperCamelCase : List[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
_UpperCamelCase : List[str] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
_UpperCamelCase : Optional[Any] = checkpoint['output_conv.1.weight_g']
_UpperCamelCase : Optional[Any] = checkpoint['output_conv.1.weight_v']
_UpperCamelCase : Dict = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
_UpperCamelCase : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase_ )
else:
_UpperCamelCase : List[Any] = SpeechTaHifiGanConfig()
_UpperCamelCase : int = SpeechTaHifiGan(UpperCAmelCase_ )
_UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ )
load_weights(orig_checkpoint['model']['generator'] , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = np.load(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = stats[0].reshape(-1 )
_UpperCamelCase : List[Any] = stats[1].reshape(-1 )
_UpperCamelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ ).float()
_UpperCamelCase : str = torch.from_numpy(UpperCAmelCase_ ).float()
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 648
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 1
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'umt5'
__lowerCamelCase : Optional[int] = ['past_key_values']
def __init__(self , A=250_112 , A=512 , A=64 , A=1_024 , A=8 , A=None , A=6 , A=32 , A=128 , A=0.1 , A=1E-6 , A=1.0 , A="gated-gelu" , A=True , A=True , A="T5Tokenizer" , A=True , A=0 , A=1 , A=0 , **A , ) -> Any:
"""simple docstring"""
super().__init__(
is_encoder_decoder=A , tokenizer_class=A , tie_word_embeddings=A , pad_token_id=A , eos_token_id=A , decoder_start_token_id=A , **A , )
_a = vocab_size
_a = d_model
_a = d_kv
_a = d_ff
_a = num_layers
_a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_a = num_heads
_a = relative_attention_num_buckets
_a = relative_attention_max_distance
_a = dropout_rate
_a = layer_norm_epsilon
_a = initializer_factor
_a = feed_forward_proj
_a = use_cache
_a = self.feed_forward_proj.split('''-''' )
_a = act_info[-1]
_a = act_info[0] == '''gated'''
if len(A ) > 1 and act_info[0] != "gated" or len(A ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_a = '''gelu_new'''
@property
def a__ (self ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return self.num_heads
@property
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self.num_layers
class __A ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_a = '''past_encoder_sequence + sequence'''
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ (self ) -> int:
"""simple docstring"""
return 13
@property
def a__ (self ) -> float:
"""simple docstring"""
return 5E-4
| 11
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 188
| 0
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case =logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
__snake_case ={
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case ={
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case =sorted(arg_to_scheduler.keys())
__snake_case ="""{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class UpperCAmelCase_ ( pl.LightningModule ):
def __init__( self : str , UpperCAmelCase__ : argparse.Namespace , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str="base" , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Any , ) -> int:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCAmelCase__ )
lowerCAmelCase = 0
lowerCAmelCase = Path(self.hparams.output_dir )
lowerCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=UpperCAmelCase__ , **UpperCAmelCase__ , )
else:
lowerCAmelCase = config
lowerCAmelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , UpperCAmelCase__ , UpperCAmelCase__ ):
assert hasattr(self.config , UpperCAmelCase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , UpperCAmelCase__ , getattr(self.hparams , UpperCAmelCase__ ) )
if tokenizer is None:
lowerCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCAmelCase__ , )
else:
lowerCAmelCase = tokenizer
lowerCAmelCase = MODEL_MODES[mode]
if model is None:
lowerCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCAmelCase__ , )
else:
lowerCAmelCase = model
def __UpperCAmelCase ( self : int , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ) -> Any:
lowerCAmelCase = self.model_type.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowerCAmelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self : int ) -> int:
lowerCAmelCase = self.model
lowerCAmelCase = ['bias', 'LayerNorm.weight']
lowerCAmelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
lowerCAmelCase = Adafactor(
UpperCAmelCase__ , lr=self.hparams.learning_rate , scale_parameter=UpperCAmelCase__ , relative_step=UpperCAmelCase__ )
else:
lowerCAmelCase = AdamW(
UpperCAmelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowerCAmelCase = optimizer
lowerCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
return self.validation_step(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]:
return self.validation_end(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Any ) -> Any:
if stage == "test":
lowerCAmelCase = len(self.test_dataloader().dataset )
else:
lowerCAmelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=UpperCAmelCase__ )
lowerCAmelCase = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> Tuple:
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.train_loader
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Any ) -> Dict:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
UpperCAmelCase__ , list(filter(UpperCAmelCase__ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Dict[str, Any] ) -> None:
lowerCAmelCase = self.output_dir.joinpath('best_tfmr' )
lowerCAmelCase = self.step_count
self.model.save_pretrained(UpperCAmelCase__ )
self.tokenizer.save_pretrained(UpperCAmelCase__ )
@staticmethod
def __UpperCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=UpperCAmelCase__ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(UpperCAmelCase__ ).parent / 'test_run' / 'cache' ) , type=UpperCAmelCase__ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=UpperCAmelCase__ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=UpperCAmelCase__ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=UpperCAmelCase__ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=UpperCAmelCase__ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=UpperCAmelCase__ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=UpperCAmelCase__ , metavar=UpperCAmelCase__ , type=UpperCAmelCase__ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=UpperCAmelCase__ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=UpperCAmelCase__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=UpperCAmelCase__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=UpperCAmelCase__ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=UpperCAmelCase__ )
parser.add_argument('--train_batch_size' , default=3_2 , type=UpperCAmelCase__ )
parser.add_argument('--eval_batch_size' , default=3_2 , type=UpperCAmelCase__ )
parser.add_argument('--adafactor' , action='store_true' )
class UpperCAmelCase_ ( pl.Callback ):
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCAmelCase_ ( pl.Callback ):
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str ) -> Dict:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCAmelCase__ )
class UpperCAmelCase_ ( pl.Callback ):
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) -> int:
lowerCAmelCase = trainer.lr_schedulers[0]['scheduler']
lowerCAmelCase = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
lowerCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(UpperCAmelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(UpperCAmelCase__ , str(metrics[key] ) ) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule ) -> List[str]:
rank_zero_info('***** Test results *****' )
lowerCAmelCase = trainer.callback_metrics
# Log and save results to file
lowerCAmelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(UpperCAmelCase__ , 'w' ) as writer:
for key in sorted(UpperCAmelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(UpperCAmelCase__ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(UpperCAmelCase__ , str(metrics[key] ) ) )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(lowerCamelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCamelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCamelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCamelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCamelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCamelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCamelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def a_ ( lowerCamelCase : BaseTransformer , lowerCamelCase : argparse.Namespace , lowerCamelCase : Optional[int]=None , lowerCamelCase : Any=True , lowerCamelCase : Tuple=[] , lowerCamelCase : Optional[int]=None , lowerCamelCase : int=None , **lowerCamelCase : Tuple , ):
pl.seed_everything(args.seed )
# init model
lowerCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
lowerCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCamelCase )
if logging_callback is None:
lowerCAmelCase = LoggingCallback()
lowerCAmelCase = {}
if args.fpaa:
lowerCAmelCase = 16
if args.gpus > 1:
lowerCAmelCase = 'auto'
lowerCAmelCase = 'ddp'
lowerCAmelCase = args.accumulate_grad_batches
lowerCAmelCase = None
lowerCAmelCase = 'auto'
lowerCAmelCase = pl.Trainer.from_argparse_args(
lowerCamelCase , weights_summary=lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCamelCase , )
if args.do_train:
trainer.fit(lowerCamelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 513
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = '''canine'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : List[str]=3_0_7_2 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=1_6_3_8_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[str]=0XE_0_0_0 , UpperCAmelCase__ : Union[str, Any]=0XE_0_0_1 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : List[str]=1_6_3_8_4 , UpperCAmelCase__ : Union[str, Any]=1_2_8 , **UpperCAmelCase__ : Dict , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
# Character config:
lowerCAmelCase = downsampling_rate
lowerCAmelCase = upsampling_kernel_size
lowerCAmelCase = num_hash_functions
lowerCAmelCase = num_hash_buckets
lowerCAmelCase = local_transformer_stride
| 513
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowerCamelCase__ = {'target_lang': 'fi', 'source_lang': 'en'}
lowerCamelCase__ = '>>zh<<'
lowerCamelCase__ = 'Helsinki-NLP/'
if is_torch_available():
lowerCamelCase__ = 'pt'
elif is_tf_available():
lowerCamelCase__ = 'tf'
else:
lowerCamelCase__ = 'jax'
@require_sentencepiece
class UpperCamelCase ( __snake_case , unittest.TestCase ):
__UpperCamelCase = MarianTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__snake_case = dict(zip(lowerCAmelCase_ ,range(len(lowerCAmelCase_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(lowerCAmelCase_ ,save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowerCAmelCase_ ,save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase_ ,save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowerCAmelCase_ ,save_dir / VOCAB_FILES_NAMES["target_spm"] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Any ,**_lowerCAmelCase : List[str] ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase_ )
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : List[str] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case = "</s>"
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) ,lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) ,lowerCAmelCase_ )
def UpperCamelCase_ ( self : List[str] ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"</s>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"<pad>" )
self.assertEqual(len(lowerCAmelCase_ ) ,9 )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
__snake_case = en_de_tokenizer(["I am a small frog"] ,return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
__snake_case = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(lowerCAmelCase_ ,batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase_ )
__snake_case = [x.name for x in Path(lowerCAmelCase_ ).glob("*" )]
self.assertIn("source.spm" ,lowerCAmelCase_ )
MarianTokenizer.from_pretrained(lowerCAmelCase_ )
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = tok(
["I am a small frog" * 1_000, "I am a small frog"] ,padding=lowerCAmelCase_ ,truncation=lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = tok(["I am a tiny frog", "I am a small frog"] ,padding=lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def UpperCamelCase_ ( self : List[str] ):
"""simple docstring"""
__snake_case = {"input_ids": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ ,model_name="Helsinki-NLP/opus-mt-en-de" ,revision="1a8c2263da11e68e50938f97e10cd57820bd504c" ,decode_kwargs={"use_source_tokenizer": True} ,)
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
__snake_case = "Tämä on testi"
__snake_case = "This is a test"
__snake_case = [76, 7, 2_047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
__snake_case = tokenizer(text_target=lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
__snake_case = tokenizer.decode(lowerCAmelCase_ ,skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
| 524
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
lowerCAmelCase__ = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {F"funnel-transformer/{name}": 512 for name in _model_names}
lowerCAmelCase__ = {F"funnel-transformer/{name}": {'do_lower_case': True} for name in _model_names}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = FunnelTokenizer
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = 2
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<sep>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<cls>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_="##" , **lowerCAmelCase_ , ):
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , clean_text=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , wordpieces_prefix=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowercase = getattr(lowerCAmelCase_ , normalizer_state.pop("type" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**lowerCAmelCase_ )
__lowercase = do_lower_case
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
__lowercase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 321
| 0
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
))
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=_UpperCAmelCase, default=1, help='''Number of TPU cores to use (1 or 8).''')
# positional
parser.add_argument(
'''training_script''', type=_UpperCAmelCase, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=_UpperCAmelCase)
return parser.parse_args()
def __snake_case ( ):
UpperCamelCase = parse_args()
# Import training_script as a module.
UpperCamelCase = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
UpperCamelCase = script_fpath.stem
UpperCamelCase = importlib.import_module(_UpperCAmelCase)
# Patch sys.argv
UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores)]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 350
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
))
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=_UpperCAmelCase, default=1, help='''Number of TPU cores to use (1 or 8).''')
# positional
parser.add_argument(
'''training_script''', type=_UpperCAmelCase, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=_UpperCAmelCase)
return parser.parse_args()
def __snake_case ( ):
UpperCamelCase = parse_args()
# Import training_script as a module.
UpperCamelCase = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
UpperCamelCase = script_fpath.stem
UpperCamelCase = importlib.import_module(_UpperCAmelCase)
# Patch sys.argv
UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores)]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 350
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__UpperCAmelCase ="0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Any:
if rng is None:
__lowerCamelCase = random.Random()
__lowerCamelCase = 1
for dim in shape:
total_dims *= dim
__lowerCamelCase = []
for _ in range(UpperCamelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__lowerCamelCase = np.array(UpperCamelCase__ , dtype=jnp.intaa ).reshape(UpperCamelCase__ )
return output
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ) -> int:
__lowerCamelCase = ids_tensor(UpperCamelCase__ , vocab_size=2 , rng=UpperCamelCase__ )
# make sure that at least one token is attended to for each batch
__lowerCamelCase = 1
return attn_mask
@require_flax
class a__ :
lowerCamelCase : Tuple =None
lowerCamelCase : Optional[Any] =()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__lowerCamelCase = 2
__lowerCamelCase = inputs['''input_ids'''].shape[-1] // 2
__lowerCamelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
__lowerCamelCase = jnp.ones_like(a )
__lowerCamelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__lowerCamelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__lowerCamelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
__lowerCamelCase = False
__lowerCamelCase = max_length
__lowerCamelCase = 0
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(a , a )
__lowerCamelCase = pt_model_class(a ).eval()
__lowerCamelCase = load_flax_weights_in_pytorch_model(a , flax_model.params )
__lowerCamelCase = flax_model.generate(a ).sequences
__lowerCamelCase = pt_model.generate(torch.tensor(a , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__lowerCamelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
__lowerCamelCase = False
__lowerCamelCase = max_length
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
__lowerCamelCase = True
__lowerCamelCase = max_length
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
__lowerCamelCase = False
__lowerCamelCase = max_length
__lowerCamelCase = 2
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
__lowerCamelCase = False
__lowerCamelCase = max_length
__lowerCamelCase = 2
__lowerCamelCase = 2
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
__lowerCamelCase = True
__lowerCamelCase = max_length
__lowerCamelCase = 0.8
__lowerCamelCase = 10
__lowerCamelCase = 0.3
__lowerCamelCase = 1
__lowerCamelCase = 8
__lowerCamelCase = 9
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
__lowerCamelCase = max_length
__lowerCamelCase = 1
__lowerCamelCase = 8
__lowerCamelCase = 9
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
__lowerCamelCase = max_length
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 8
__lowerCamelCase = 9
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCamelCase = attention_mask.at[(0, 0)].set(0 )
__lowerCamelCase = False
__lowerCamelCase = max_length
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a , attention_mask=a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a , attention_mask=a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCamelCase = attention_mask.at[(0, 0)].set(0 )
__lowerCamelCase = True
__lowerCamelCase = max_length
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a , attention_mask=a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a , attention_mask=a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCamelCase = attention_mask.at[(0, 0)].set(0 )
__lowerCamelCase = 2
__lowerCamelCase = max_length
for model_class in self.all_generative_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model.generate(a , attention_mask=a ).sequences
self.assertEqual(generation_outputs.shape[-1] , a )
__lowerCamelCase = jit(model.generate )
__lowerCamelCase = jit_generate(a , attention_mask=a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
__lowerCamelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__lowerCamelCase = '''Hello world'''
__lowerCamelCase = tokenizer(a , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(a , '''do_samples''' ):
model.generate(a , do_samples=a )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(a , '''foo''' ):
__lowerCamelCase = {'''foo''': '''bar'''}
model.generate(a , **a )
| 546
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if digit_amount > 0:
return round(number - int(UpperCamelCase__ ) , UpperCamelCase__ )
return number - int(UpperCamelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 546
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
__a : Union[str, Any] = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class A ( __A ):
_SCREAMING_SNAKE_CASE : List[Any] = '''tapas'''
def __init__( self : str , __UpperCAmelCase : Optional[Any]=30522 , __UpperCAmelCase : Dict=768 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : List[str]=3072 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : List[str]=[3, 256, 256, 2, 256, 256, 10] , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Tuple=1E-1_2 , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=10.0 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=1.0 , __UpperCAmelCase : Optional[Any]=1.0 , __UpperCAmelCase : str=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Union[str, Any]="ratio" , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=False , __UpperCAmelCase : str=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=False , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_sizes
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCamelCase_ = positive_label_weight
UpperCamelCase_ = num_aggregation_labels
UpperCamelCase_ = aggregation_loss_weight
UpperCamelCase_ = use_answer_as_supervision
UpperCamelCase_ = answer_loss_importance
UpperCamelCase_ = use_normalized_answer_loss
UpperCamelCase_ = huber_loss_delta
UpperCamelCase_ = temperature
UpperCamelCase_ = aggregation_temperature
UpperCamelCase_ = use_gumbel_for_cells
UpperCamelCase_ = use_gumbel_for_aggregation
UpperCamelCase_ = average_approximation_function
UpperCamelCase_ = cell_selection_preference
UpperCamelCase_ = answer_loss_cutoff
UpperCamelCase_ = max_num_rows
UpperCamelCase_ = max_num_columns
UpperCamelCase_ = average_logits_per_cell
UpperCamelCase_ = select_one_column
UpperCamelCase_ = allow_empty_column_selection
UpperCamelCase_ = init_cell_selection_weights_to_zero
UpperCamelCase_ = reset_position_index_per_cell
UpperCamelCase_ = disable_per_token_loss
# Aggregation hyperparameters
UpperCamelCase_ = aggregation_labels
UpperCamelCase_ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __UpperCAmelCase ):
UpperCamelCase_ = {int(__UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Any = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 559
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
a = None
def UpperCAmelCase_ ( _A , _A , ):
'''simple docstring'''
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE__ = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
SCREAMING_SNAKE_CASE__ = df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' )
SCREAMING_SNAKE_CASE__ = partition_df.collect()
SCREAMING_SNAKE_CASE__ = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class UpperCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = df
SCREAMING_SNAKE_CASE__ = partition_order or range(self.df.rdd.getNumPartitions() )
SCREAMING_SNAKE_CASE__ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[int] ) -> str:
yield from self.generate_examples_fn()
def lowercase_ ( self : int , __lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.split_shard_indices_by_worker(__lowerCamelCase , __lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCamelCase )
@property
def lowercase_ ( self : Dict ) -> Any:
return len(self.partition_order )
class UpperCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
a = SparkConfig
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] = None , __lowerCamelCase : Union[str, Any] = None , **__lowerCamelCase : Union[str, Any] , ) -> Dict:
import pyspark
SCREAMING_SNAKE_CASE__ = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE__ = df
SCREAMING_SNAKE_CASE__ = working_dir
super().__init__(
cache_dir=__lowerCamelCase , config_name=str(self.df.semanticHash() ) , **__lowerCamelCase , )
def lowercase_ ( self : str ) -> int:
def create_cache_and_write_probe(__lowerCamelCase : Union[str, Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowerCamelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE__ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def lowercase_ ( self : List[str] ) -> List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self : Any , __lowerCamelCase : Dict ) -> List[str]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowercase_ ( self : Dict , __lowerCamelCase : Any ) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(__lowerCamelCase : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
SCREAMING_SNAKE_CASE__ = self.df.count()
SCREAMING_SNAKE_CASE__ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE__ = (
self.df.limit(__lowerCamelCase )
.repartition(1 )
.mapInArrow(__lowerCamelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE__ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , int(approx_total_size / max_shard_size ) )
SCREAMING_SNAKE_CASE__ = self.df.repartition(__lowerCamelCase )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any , ) -> Any:
import pyspark
SCREAMING_SNAKE_CASE__ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
SCREAMING_SNAKE_CASE__ = os.path.join(self._working_dir , os.path.basename(__lowerCamelCase ) ) if self._working_dir else fpath
SCREAMING_SNAKE_CASE__ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE__ = self.config.features
SCREAMING_SNAKE_CASE__ = self._writer_batch_size
SCREAMING_SNAKE_CASE__ = self._fs.storage_options
def write_arrow(__lowerCamelCase : Tuple ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE__ = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE__ = next(__lowerCamelCase , __lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = writer_class(
features=__lowerCamelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCamelCase , storage_options=__lowerCamelCase , embed_local_files=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = pa.Table.from_batches([first_batch] )
writer.write_table(__lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
SCREAMING_SNAKE_CASE__ = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCamelCase , storage_options=__lowerCamelCase , embed_local_files=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = pa.Table.from_batches([batch] )
writer.write_table(__lowerCamelCase )
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = os.path.join(os.path.dirname(__lowerCamelCase ) , os.path.basename(__lowerCamelCase ) )
shutil.move(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (
self.df.mapInArrow(__lowerCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowercase_ ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple = "arrow" , __lowerCamelCase : Any = None , __lowerCamelCase : List[Any] = None , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
self._validate_cache_dir()
SCREAMING_SNAKE_CASE__ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = not is_remote_filesystem(self._fs )
SCREAMING_SNAKE_CASE__ = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE__ = '''-TTTTT-SSSSS-of-NNNNN'''
SCREAMING_SNAKE_CASE__ = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
SCREAMING_SNAKE_CASE__ = path_join(self._output_dir , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for task_id, content in self._prepare_split_single(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = total_num_examples
SCREAMING_SNAKE_CASE__ = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
SCREAMING_SNAKE_CASE__ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE__ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , ):
rename(
__lowerCamelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = task_id_and_num_shards[i]
for shard_id in range(__lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowerCamelCase , len(__lowerCamelCase ) ).map(lambda __lowerCamelCase : _rename_shard(*__lowerCamelCase ) ).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(__lowerCamelCase , '''''' ) , )
def lowercase_ ( self : List[str] , __lowerCamelCase : Union[str, Any] , ) -> Union[str, Any]:
return SparkExamplesIterable(self.df )
| 493
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
super().__init__(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , index=lowerCamelCase , init_retrieval=lowerCamelCase , )
_lowerCAmelCase = None
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
_lowerCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCAmelCase = str(distributed_port + 1 )
_lowerCAmelCase = dist.new_group(ranks=lowerCamelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def A__ (self ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=torch.floataa ):
'''simple docstring'''
_lowerCAmelCase = torch.empty(lowerCamelCase , dtype=lowerCamelCase )
dist.scatter(lowerCamelCase , src=0 , scatter_list=lowerCamelCase , group=self.process_group )
return target_tensor
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCAmelCase = next((addr for addr in addrs if addr.startswith("""e""" )) , lowerCamelCase )
return ifname
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not dist.is_initialized():
_lowerCAmelCase , _lowerCAmelCase = self._main_retrieve(lowerCamelCase , lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase )
# distributed training
_lowerCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCAmelCase = None
if self._is_main():
_lowerCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCamelCase )]
dist.gather(torch.tensor(lowerCamelCase ) , dst=0 , gather_list=lowerCamelCase , group=self.process_group )
# scatter logic
_lowerCAmelCase = question_hidden_states.shape[0]
_lowerCAmelCase = []
_lowerCAmelCase = []
if self._is_main():
assert len(lowerCamelCase ) == world_size
_lowerCAmelCase , _lowerCAmelCase = self._main_retrieve(torch.cat(lowerCamelCase ).numpy() , lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase )
_lowerCAmelCase = self._chunk_tensor(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self._chunk_tensor(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self._scattered(lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_lowerCAmelCase = self._scattered(lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCamelCase )
| 156
| 0
|
import string
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase :Any = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase :str = string.ascii_uppercase.find(_A )
__UpperCamelCase :Union[str, Any] = num - key
if num < 0:
__UpperCamelCase :Optional[Any] = num + len(string.ascii_uppercase )
__UpperCamelCase :str = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase :Union[str, Any] = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = input('''Encrypted message: ''' )
__UpperCamelCase :List[Any] = message.upper()
decrypt(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=19 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Optional[Any]:
__UpperCamelCase :Dict = parent
__UpperCamelCase :Optional[Any] = batch_size
__UpperCamelCase :Any = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :Any = use_input_mask
__UpperCamelCase :Optional[int] = use_token_type_ids
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = vocab_size
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Dict = intermediate_size
__UpperCamelCase :List[str] = hidden_act
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :List[Any] = type_vocab_size
__UpperCamelCase :int = type_sequence_label_size
__UpperCamelCase :str = initializer_range
__UpperCamelCase :Optional[Any] = num_labels
__UpperCamelCase :int = num_choices
__UpperCamelCase :Optional[Any] = scope
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :int = None
if self.use_input_mask:
__UpperCamelCase :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Dict = None
__UpperCamelCase :List[Any] = None
__UpperCamelCase :Tuple = None
if self.use_labels:
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :str = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__lowercase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :int = EsmForProteinFolding(config=__lowercase).float()
model.to(__lowercase)
model.eval()
__UpperCamelCase :Tuple = model(__lowercase , attention_mask=__lowercase)
__UpperCamelCase :Any = model(__lowercase)
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :List[str] = config_and_inputs
__UpperCamelCase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = False
a__ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
a__ : str = ()
a__ : Tuple = {} if is_torch_available() else {}
a__ : List[Any] = False
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = EsmFoldModelTester(self)
__UpperCamelCase :Dict = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
@unittest.skip('''Does not support attention outputs''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip('''Esm does not support embedding resizing''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''')
def UpperCamelCase__ ( self) -> str:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold only has one output format.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support input chunking.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''')
def UpperCamelCase__ ( self) -> int:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''').float()
model.eval()
__UpperCamelCase :Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
__UpperCamelCase :List[Any] = model(__lowercase)['''positions''']
__UpperCamelCase :Optional[int] = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __lowercase , atol=1E-4))
| 452
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : str = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Dict=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None ):
'''simple docstring'''
snake_case_ : Tuple = True
while ask_again:
snake_case_ : Any = input(lowerCamelCase_ )
try:
if default is not None and len(lowerCamelCase_ ) == 0:
return default
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any]=[] , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Union[str, Any]=0 ):
'''simple docstring'''
snake_case_ : List[str] = BulletMenu(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : int = menu.run(default_choice=lowerCamelCase_ )
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : str = int(lowerCamelCase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = int(lowerCamelCase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = int(lowerCamelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[Any] = int(lowerCamelCase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : str = int(lowerCamelCase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def a__ ( self :Tuple ,_UpperCamelCase :Any ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Any ):
snake_case_ : List[Any] = super()._format_usage(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[Any] = usage.replace("""<command> [<args>] """ ,"""""" )
return usage
| 334
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __UpperCamelCase ( lowercase__ ):
def __init__( self :str ,_UpperCamelCase :Distribution ,_UpperCamelCase :int=None ,_UpperCamelCase :Optional[Any]=None ,_UpperCamelCase :Optional[Any]=0 ):
snake_case_ : Any = 1.0 if scale is None else scale
snake_case_ : Any = 0.0 if loc is None else loc
super().__init__(_UpperCamelCase ,[AffineTransform(loc=self.loc ,scale=self.scale ,event_dim=_UpperCamelCase )] )
@property
def a__ ( self :List[str] ):
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self :Any ):
return self.base_dist.variance * self.scale**2
@property
def a__ ( self :str ):
return self.variance.sqrt()
class __UpperCamelCase ( nn.Module ):
def __init__( self :int ,_UpperCamelCase :int ,_UpperCamelCase :Dict[str, int] ,_UpperCamelCase :Callable[..., Tuple[torch.Tensor]] ,**_UpperCamelCase :Dict ):
super().__init__(**_UpperCamelCase )
snake_case_ : List[str] = args_dim
snake_case_ : int = nn.ModuleList([nn.Linear(_UpperCamelCase ,_UpperCamelCase ) for dim in args_dim.values()] )
snake_case_ : List[str] = domain_map
def a__ ( self :int ,_UpperCamelCase :torch.Tensor ):
snake_case_ : Any = [proj(_UpperCamelCase ) for proj in self.proj]
return self.domain_map(*_UpperCamelCase )
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[Any] ,_UpperCamelCase :str ):
super().__init__()
snake_case_ : Dict = function
def a__ ( self :List[Any] ,_UpperCamelCase :List[str] ,*_UpperCamelCase :Any ):
return self.function(_UpperCamelCase ,*_UpperCamelCase )
class __UpperCamelCase :
lowercase : type
lowercase : int
lowercase : Dict[str, int]
def __init__( self :Any ,_UpperCamelCase :int = 1 ):
snake_case_ : Optional[Any] = dim
snake_case_ : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self :Tuple ,_UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*_UpperCamelCase )
else:
return Independent(self.distribution_class(*_UpperCamelCase ) ,1 )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[torch.Tensor] = None ,_UpperCamelCase :Optional[torch.Tensor] = None ,):
snake_case_ : Dict = self._base_distribution(_UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_UpperCamelCase ,loc=_UpperCamelCase ,scale=_UpperCamelCase ,event_dim=self.event_dim )
@property
def a__ ( self :Any ):
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self :List[Any] ):
return len(self.event_shape )
@property
def a__ ( self :Dict ):
return 0.0
def a__ ( self :Dict ,_UpperCamelCase :int ):
return ParameterProjection(
in_features=_UpperCamelCase ,args_dim=self.args_dim ,domain_map=LambdaLayer(self.domain_map ) ,)
def a__ ( self :Any ,*_UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def a__ ( _UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(_UpperCamelCase ) + 4.0 )) / 2.0
class __UpperCamelCase ( lowercase__ ):
lowercase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
lowercase : type = StudentT
@classmethod
def a__ ( cls :Optional[Any] ,_UpperCamelCase :torch.Tensor ,_UpperCamelCase :torch.Tensor ,_UpperCamelCase :torch.Tensor ):
snake_case_ : int = cls.squareplus(_UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case_ : Optional[int] = 2.0 + cls.squareplus(_UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( lowercase__ ):
lowercase : Dict[str, int] = {"loc": 1, "scale": 1}
lowercase : type = Normal
@classmethod
def a__ ( cls :Tuple ,_UpperCamelCase :torch.Tensor ,_UpperCamelCase :torch.Tensor ):
snake_case_ : int = cls.squareplus(_UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( lowercase__ ):
lowercase : Dict[str, int] = {"total_count": 1, "logits": 1}
lowercase : type = NegativeBinomial
@classmethod
def a__ ( cls :Dict ,_UpperCamelCase :torch.Tensor ,_UpperCamelCase :torch.Tensor ):
snake_case_ : Optional[Any] = cls.squareplus(_UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self :int ,_UpperCamelCase :Any ):
snake_case_ , snake_case_ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_UpperCamelCase ,logits=_UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=_UpperCamelCase ,logits=_UpperCamelCase ) ,1 )
def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[torch.Tensor] = None ,_UpperCamelCase :Optional[torch.Tensor] = None ):
snake_case_ , snake_case_ : Tuple = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 334
| 1
|
"""simple docstring"""
A_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
_snake_case : Any = [False] * len(snake_case__ )
_snake_case : Tuple = [s]
_snake_case : Tuple = True
while queue:
_snake_case : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case__ )
_snake_case : Dict = True
_snake_case : Union[str, Any] = u
return visited[t]
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = [-1] * (len(snake_case__ ))
_snake_case : Tuple = 0
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
_snake_case : int = float("""Inf""" )
_snake_case : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
_snake_case : int = min(snake_case__ , graph[parent[s]][s] )
_snake_case : Union[str, Any] = parent[s]
max_flow += path_flow
_snake_case : Optional[Any] = sink
while v != source:
_snake_case : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case : str = parent[v]
for i in range(len(snake_case__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 28
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 239
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :int ):
'''simple docstring'''
a = k_size // 2
a , a = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
a = 1 / (2 * pi * sigma) * exp(-(square(UpperCAmelCase__ ) + square(UpperCAmelCase__ )) / (2 * square(UpperCAmelCase__ )) )
return g
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
a , a = image.shape[0], image.shape[1]
# dst image height and width
a = height - k_size + 1
a = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
a = zeros((dst_height * dst_width, k_size * k_size) )
a = 0
for i, j in product(range(UpperCAmelCase__ ) , range(UpperCAmelCase__ ) ):
a = ravel(image[i : i + k_size, j : j + k_size] )
a = window
row += 1
# turn the kernel into shape(k*k, 1)
a = gen_gaussian_kernel(UpperCAmelCase__ , UpperCAmelCase__ )
a = ravel(UpperCAmelCase__ )
# reshape and get the dst image
a = dot(UpperCAmelCase__ , UpperCAmelCase__ ).reshape(UpperCAmelCase__ , UpperCAmelCase__ ).astype(UpperCAmelCase__ )
return dst
if __name__ == "__main__":
# read original image
A_ : int = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
A_ : str = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
A_ : List[str] = gaussian_filter(gray, 3, sigma=1)
A_ : Optional[int] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.