code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
A : int = 8.31_44_62 # Unit - J mol-1 K-1
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140
|
import math
from numpy import inf
from scipy.integrate import quad
def a__ ( __UpperCamelCase ):
if num <= 0:
raise ValueError("math domain error" )
return quad(__UpperCamelCase , 0 , __UpperCamelCase , args=(__UpperCamelCase) )[0]
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return math.pow(__UpperCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140
| 1
|
import sys
__lowerCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE ( snake_case_ : str = N ):
snake_case__ : Tuple = -sys.maxsize - 1
for i in range(len(snake_case_ ) - 12 ):
snake_case__ : Optional[int] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : Tuple = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 25
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25
| 1
|
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase__ ='Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __UpperCamelCase ( ):
__a : Optional[Any] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__a : str = get_sagemaker_input()
else:
__a : Tuple = get_cluster_input()
return config
def __UpperCamelCase ( lowerCAmelCase__ : Any=None ):
if subparsers is not None:
__a : Tuple = subparsers.add_parser('''config''' , description=snake_case__ )
else:
__a : List[Any] = argparse.ArgumentParser('''Accelerate config command''' , description=snake_case__ )
parser.add_argument(
'''--config_file''' , default=snake_case__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
__a : Union[str, Any] = get_user_input()
if args.config_file is not None:
__a : Dict = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
__a : Union[str, Any] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(f"accelerate configuration saved at {config_file}" )
def __UpperCamelCase ( ):
__a : Dict = config_command_parser()
__a : Dict = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 521
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 91
| 0
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
a : Tuple = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __lowerCamelCase ( _lowercase , _lowercase = None ) -> List[str]:
UpperCAmelCase : List[str] = F'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , __a ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = requirement, None, None
else:
UpperCAmelCase : Dict = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F''' got {requirement}''' )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = match[0]
UpperCAmelCase : str = want_full.split(""",""" ) # there could be multiple requirements
UpperCAmelCase : Any = {}
for w in want_range:
UpperCAmelCase : Optional[Any] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F''' but got {requirement}''' )
UpperCAmelCase , UpperCAmelCase : List[Any] = match[0]
UpperCAmelCase : List[str] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
UpperCAmelCase : Any = """.""".join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
UpperCAmelCase : Union[str, Any] = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Optional[int] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__a , __a )
| 720
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672
| 0
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a_ = """http://www.mocksite.com/file1.txt"""
a_ = """\"text\": [\"foo\", \"foo\"]"""
a_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Dict = 200
lowerCAmelCase__ : Any = {'Content-Length': '100'}
lowerCAmelCase__ : str = {}
def _UpperCAmelCase ( self: int , **__lowerCAmelCase: List[Any] ) -> Dict:
'''simple docstring'''
return [bytes(__lowerCAmelCase , "utf-8" )]
def __lowerCAmelCase ( *A_ : Any , **A_ : Tuple ) -> Tuple:
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def __lowerCAmelCase ( A_ : Any , A_ : Any , A_ : Optional[Any] ) -> List[Any]:
import requests
monkeypatch.setattr(A_ , "request" , A_ )
__UpperCAmelCase = URL
if issubclass(A_ , A_ ):
__UpperCAmelCase = url
elif issubclass(A_ , A_ ):
__UpperCAmelCase = [url]
elif issubclass(A_ , A_ ):
__UpperCAmelCase = {"train": url}
__UpperCAmelCase = "dummy"
__UpperCAmelCase = "downloads"
__UpperCAmelCase = tmp_path
__UpperCAmelCase = DownloadConfig(
cache_dir=os.path.join(A_ , A_ ) , use_etag=A_ , )
__UpperCAmelCase = DownloadManager(dataset_name=A_ , download_config=A_ )
__UpperCAmelCase = dl_manager.download(A_ )
__UpperCAmelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A_ , A_ ):
__UpperCAmelCase = [downloaded_paths]
__UpperCAmelCase = [urls]
elif isinstance(A_ , A_ ):
assert "train" in downloaded_paths.keys()
__UpperCAmelCase = downloaded_paths.values()
__UpperCAmelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A_ , A_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__UpperCAmelCase = Path(A_ )
__UpperCAmelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__UpperCAmelCase = downloaded_path.read_text()
assert content == CONTENT
__UpperCAmelCase = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
__UpperCAmelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def __lowerCAmelCase ( A_ : int , A_ : List[Any] , A_ : str ) -> Optional[Any]:
__UpperCAmelCase = str(A_ )
if issubclass(A_ , A_ ):
__UpperCAmelCase = filename
elif issubclass(A_ , A_ ):
__UpperCAmelCase = [filename]
elif issubclass(A_ , A_ ):
__UpperCAmelCase = {"train": filename}
__UpperCAmelCase = "dummy"
__UpperCAmelCase = xz_file.parent
__UpperCAmelCase = "extracted"
__UpperCAmelCase = DownloadConfig(
cache_dir=A_ , use_etag=A_ , )
__UpperCAmelCase = DownloadManager(dataset_name=A_ , download_config=A_ )
__UpperCAmelCase = dl_manager.extract(A_ )
__UpperCAmelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(A_ , A_ ):
__UpperCAmelCase = [extracted_paths]
__UpperCAmelCase = [paths]
elif isinstance(A_ , A_ ):
assert "train" in extracted_paths.keys()
__UpperCAmelCase = extracted_paths.values()
__UpperCAmelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A_ , A_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__UpperCAmelCase = Path(A_ )
__UpperCAmelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A_ , etag=A_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__UpperCAmelCase = extracted_path.read_text()
__UpperCAmelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCAmelCase ( A_ : List[Any] , A_ : Union[str, Any] ) -> Any:
assert path.endswith(".jsonl" )
for num_items, line in enumerate(A_ , start=1 ):
__UpperCAmelCase = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def __lowerCAmelCase ( A_ : List[str] , A_ : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase = request.getfixturevalue(A_ )
__UpperCAmelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A_ ) , start=1 ):
_test_jsonl(A_ , A_ )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Dict ) -> Any:
__UpperCAmelCase = request.getfixturevalue(A_ )
__UpperCAmelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A_ ) , start=1 ):
_test_jsonl(A_ , A_ )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCAmelCase ( A_ : List[Any] ) -> Tuple:
__UpperCAmelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A_ ) , start=1 ):
assert os.path.basename(A_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 221
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : int
lowerCAmelCase__ : TreeNode | None = None
lowerCAmelCase__ : TreeNode | None = None
a_ = namedtuple("""CoinsDistribResult""", """moves excess""")
def __lowerCAmelCase ( A_ : TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(A_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A_ ) != count_coins(A_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(A_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__UpperCAmelCase , __UpperCAmelCase = get_distrib(node.left )
__UpperCAmelCase , __UpperCAmelCase = get_distrib(node.right )
__UpperCAmelCase = 1 - left_distrib_excess
__UpperCAmelCase = 1 - right_distrib_excess
__UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A_ )
+ abs(A_ )
)
__UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A_ , A_ )
return get_distrib(A_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221
| 1
|
from __future__ import annotations
def snake_case ( UpperCAmelCase : int | float | str, UpperCAmelCase : int | float | str ):
if nth_term == "":
return [""]
A = int(UpperCAmelCase )
A = int(UpperCAmelCase )
A = []
for temp in range(int(UpperCAmelCase ) ):
series.append(f'1 / {pow(temp + 1, int(UpperCAmelCase ) )}' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = int(input('Enter the last number (nth term) of the P-Series'))
lowerCAmelCase_ = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 110
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCAmelCase_ = '▁'
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = AlbertTokenizer
def __init__( self : str ,_SCREAMING_SNAKE_CASE : int=None ,_SCREAMING_SNAKE_CASE : List[str]=None ,_SCREAMING_SNAKE_CASE : List[str]=True ,_SCREAMING_SNAKE_CASE : str=True ,_SCREAMING_SNAKE_CASE : Optional[int]=False ,_SCREAMING_SNAKE_CASE : List[str]="[CLS]" ,_SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" ,_SCREAMING_SNAKE_CASE : Tuple="<unk>" ,_SCREAMING_SNAKE_CASE : Union[str, Any]="[SEP]" ,_SCREAMING_SNAKE_CASE : Tuple="<pad>" ,_SCREAMING_SNAKE_CASE : Any="[CLS]" ,_SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" ,**_SCREAMING_SNAKE_CASE : Any ,) -> Tuple:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = (
AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ,normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,remove_space=_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def A( self : int ,_SCREAMING_SNAKE_CASE : List[int] ,_SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A( self : List[Any] ,_SCREAMING_SNAKE_CASE : List[int] ,_SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 110
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
__lowercase = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
__lowercase = '''▁'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase) if isinstance(__lowerCAmelCase , __lowerCAmelCase) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowerCAmelCase))
lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowerCAmelCase = len(self.sp_model) - 1
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase)) + [1]
return [1] + ([0] * len(__lowerCAmelCase)) + [1, 1] + ([0] * len(__lowerCAmelCase)) + [1]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def a_ ( self):
"""simple docstring"""
return len(self.sp_model)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(__lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase = self.sp_model.PieceToId(__lowerCAmelCase)
return spm_id if spm_id else self.unk_token_id
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = """"""
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__lowerCAmelCase)
lowerCAmelCase = False
out_string += self.sp_model.decode(__lowerCAmelCase)
return out_string.strip()
def __getstate__( self):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
lowerCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(__lowerCAmelCase , """wb""") as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase)
return (out_vocab_file,)
| 370
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : "DiagonalGaussianDistribution"
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = True
@register_to_config
def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = ("DownEncoderBlock2D",) , __lowerCAmelCase = ("UpDecoderBlock2D",) , __lowerCAmelCase = (64,) , __lowerCAmelCase = 1 , __lowerCAmelCase = "silu" , __lowerCAmelCase = 4 , __lowerCAmelCase = 32 , __lowerCAmelCase = 32 , __lowerCAmelCase = 0.18215 , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , act_fn=__lowerCAmelCase , )
lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
lowerCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1)
lowerCAmelCase = False
lowerCAmelCase = False
# only relevant if vae tiling is enabled
lowerCAmelCase = self.config.sample_size
lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
lowerCAmelCase = 0.25
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (Encoder, Decoder)):
lowerCAmelCase = value
def a_ ( self , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = use_tiling
def a_ ( self):
"""simple docstring"""
self.enable_tiling(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = True
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {}
def fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
if hasattr(__lowerCAmelCase , """set_processor"""):
lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , __lowerCAmelCase , __lowerCAmelCase)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
return processors
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = len(self.attn_processors.keys())
if isinstance(__lowerCAmelCase , __lowerCAmelCase) and len(__lowerCAmelCase) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(__lowerCAmelCase)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
if hasattr(__lowerCAmelCase , """set_processor"""):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase):
module.set_processor(__lowerCAmelCase)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , __lowerCAmelCase , __lowerCAmelCase)
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__lowerCAmelCase , return_dict=__lowerCAmelCase)
if self.use_slicing and x.shape[0] > 1:
lowerCAmelCase = [self.encoder(__lowerCAmelCase) for x_slice in x.split(1)]
lowerCAmelCase = torch.cat(__lowerCAmelCase)
else:
lowerCAmelCase = self.encoder(__lowerCAmelCase)
lowerCAmelCase = self.quant_conv(__lowerCAmelCase)
lowerCAmelCase = DiagonalGaussianDistribution(__lowerCAmelCase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__lowerCAmelCase , return_dict=__lowerCAmelCase)
lowerCAmelCase = self.post_quant_conv(__lowerCAmelCase)
lowerCAmelCase = self.decoder(__lowerCAmelCase)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
@apply_forward_hook
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
lowerCAmelCase = [self._decode(__lowerCAmelCase).sample for z_slice in z.split(1)]
lowerCAmelCase = torch.cat(__lowerCAmelCase)
else:
lowerCAmelCase = self._decode(__lowerCAmelCase).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = min(a.shape[2] , b.shape[2] , __lowerCAmelCase)
for y in range(__lowerCAmelCase):
lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = min(a.shape[3] , b.shape[3] , __lowerCAmelCase)
for x in range(__lowerCAmelCase):
lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor)
lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowerCAmelCase = []
for i in range(0 , x.shape[2] , __lowerCAmelCase):
lowerCAmelCase = []
for j in range(0 , x.shape[3] , __lowerCAmelCase):
lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowerCAmelCase = self.encoder(__lowerCAmelCase)
lowerCAmelCase = self.quant_conv(__lowerCAmelCase)
row.append(__lowerCAmelCase)
rows.append(__lowerCAmelCase)
lowerCAmelCase = []
for i, row in enumerate(__lowerCAmelCase):
lowerCAmelCase = []
for j, tile in enumerate(__lowerCAmelCase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase = self.blend_v(rows[i - 1][j] , __lowerCAmelCase , __lowerCAmelCase)
if j > 0:
lowerCAmelCase = self.blend_h(row[j - 1] , __lowerCAmelCase , __lowerCAmelCase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__lowerCAmelCase , dim=3))
lowerCAmelCase = torch.cat(__lowerCAmelCase , dim=2)
lowerCAmelCase = DiagonalGaussianDistribution(__lowerCAmelCase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor)
lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowerCAmelCase = []
for i in range(0 , z.shape[2] , __lowerCAmelCase):
lowerCAmelCase = []
for j in range(0 , z.shape[3] , __lowerCAmelCase):
lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowerCAmelCase = self.post_quant_conv(__lowerCAmelCase)
lowerCAmelCase = self.decoder(__lowerCAmelCase)
row.append(__lowerCAmelCase)
rows.append(__lowerCAmelCase)
lowerCAmelCase = []
for i, row in enumerate(__lowerCAmelCase):
lowerCAmelCase = []
for j, tile in enumerate(__lowerCAmelCase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase = self.blend_v(rows[i - 1][j] , __lowerCAmelCase , __lowerCAmelCase)
if j > 0:
lowerCAmelCase = self.blend_h(row[j - 1] , __lowerCAmelCase , __lowerCAmelCase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__lowerCAmelCase , dim=3))
lowerCAmelCase = torch.cat(__lowerCAmelCase , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(__lowerCAmelCase).latent_dist
if sample_posterior:
lowerCAmelCase = posterior.sample(generator=__lowerCAmelCase)
else:
lowerCAmelCase = posterior.mode()
lowerCAmelCase = self.decode(__lowerCAmelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
| 370
| 1
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = 1.5
_lowerCamelCase : Any = int(factor * num_class_images )
_lowerCamelCase : Any = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=__UpperCamelCase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
_lowerCamelCase : Dict = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
_lowerCamelCase : Union[str, Any] = int(factor * num_images )
_lowerCamelCase : str = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = tqdm(desc='downloading real regularization images' , total=__UpperCamelCase )
with open(f'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(f'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
f'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
_lowerCamelCase : int = class_images[count]
count += 1
try:
_lowerCamelCase : List[Any] = requests.get(images['url'] )
if img.status_code == 200:
_lowerCamelCase : List[str] = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _snake_case ( ):
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser('' , add_help=__UpperCamelCase )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
lowercase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """deit"""
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Dict = qkv_bias
_lowerCamelCase : Dict = encoder_stride
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self ):
return 1E-4
| 492
| 0
|
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 32 , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = [0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ = [0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ = True , UpperCamelCase__=7 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=3 , ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = parent
snake_case : Dict = do_resize
snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 288}
snake_case : List[Any] = size_divisor
snake_case : Any = do_rescale
snake_case : Union[str, Any] = rescale_factor
snake_case : int = do_normalize
snake_case : List[Any] = do_center_crop
snake_case : str = image_mean
snake_case : Union[str, Any] = image_std
snake_case : Optional[int] = do_pad
snake_case : Tuple = batch_size
snake_case : int = num_channels
snake_case : Any = min_resolution
snake_case : List[str] = max_resolution
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
if not batched:
snake_case : int = self.size["""shortest_edge"""]
snake_case : Any = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
snake_case : Optional[int] = image.size
else:
snake_case : int = image.shape[1], image.shape[2]
snake_case : Tuple = size / min(__lowerCAmelCase , __lowerCAmelCase )
if h < w:
snake_case : List[Any] = size, scale * w
else:
snake_case : Union[str, Any] = scale * h, size
snake_case : Any = int((1333 / 800) * size )
if max(__lowerCAmelCase , __lowerCAmelCase ) > max_size:
snake_case : Any = max_size / max(__lowerCAmelCase , __lowerCAmelCase )
snake_case : Tuple = newh * scale
snake_case : List[str] = neww * scale
snake_case : List[str] = int(newh + 0.5 ), int(neww + 0.5 )
snake_case : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case : Any = []
for image in image_inputs:
snake_case : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case : List[str] = max(__lowerCAmelCase , key=lambda UpperCamelCase__ : item[0] )[0]
snake_case : List[str] = max(__lowerCAmelCase , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__UpperCAmelCase : Tuple = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "size_divisor" ) )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
snake_case : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case : int = image_processing(__lowerCAmelCase , return_tensors="pt" ).pixel_values
snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
snake_case : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case : Optional[int] = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case : Optional[Any] = image_processing(__lowerCAmelCase , return_tensors="pt" ).pixel_values
snake_case : Tuple = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case : int = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case : Optional[int] = image_processing(__lowerCAmelCase , return_tensors="pt" ).pixel_values
snake_case : str = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 178
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def snake_case ( a_ : float , a_ : float , a_ : float ) -> tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_lowercase)
class UpperCAmelCase_ ( _lowercase):
def __init__( self : int , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] ) -> Any:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : List[str]=None ) -> Dict:
_UpperCamelCase = {}
_UpperCamelCase = {}
if prompt is not None:
_UpperCamelCase = prompt
if generate_kwargs is not None:
_UpperCamelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCamelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_UpperCamelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , __UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__UpperCamelCase : Tuple ) -> Dict:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : str=None ) -> Union[str, Any]:
_UpperCamelCase = load_image(__UpperCamelCase )
if prompt is not None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
F'''Received an invalid text input, got - {type(__UpperCamelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_UpperCamelCase = self.model.config.model_type
if model_type == "git":
_UpperCamelCase = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
_UpperCamelCase = self.tokenizer(text=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids
_UpperCamelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCamelCase = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_UpperCamelCase = self.image_processor(images=__UpperCamelCase , header_text=__UpperCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCamelCase = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
_UpperCamelCase = self.tokenizer(__UpperCamelCase , return_tensors=self.framework )
model_inputs.update(__UpperCamelCase )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCamelCase = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCamelCase = None
return model_inputs
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=None ) -> Tuple:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __UpperCamelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_UpperCamelCase = None
if generate_kwargs is None:
_UpperCamelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCamelCase = model_inputs.pop(self.model.main_input_name )
_UpperCamelCase = self.model.generate(__UpperCamelCase , **__UpperCamelCase , **__UpperCamelCase )
return model_outputs
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Any ) -> Union[str, Any]:
_UpperCamelCase = []
for output_ids in model_outputs:
_UpperCamelCase = {
'''generated_text''': self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
| 342
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase = bs[:]
_UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase = [chr(a__ ) for n in cs]
return dict(zip(a__ , a__ ) )
def lowercase ( a__ : Any ) -> Union[str, Any]:
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
return pairs
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str]="replace" , __UpperCamelCase : Any="<s>" , __UpperCamelCase : List[str]="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Any="<s>" , __UpperCamelCase : Tuple="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Optional[int] , ) -> Optional[Any]:
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(__UpperCamelCase )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = errors # how to handle errors in decoding
_UpperCamelCase = bytes_to_unicode()
_UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCamelCase = {}
_UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCamelCase ( self : Dict ) -> List[Any]:
return len(self.encoder )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : int , __UpperCamelCase : int ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCamelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCamelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCamelCase = get_pairs(__UpperCamelCase )
_UpperCamelCase = ''' '''.join(__UpperCamelCase )
_UpperCamelCase = word
return word
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[str] ) -> Optional[int]:
_UpperCamelCase = []
for token in re.findall(self.pat , __UpperCamelCase ):
_UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(''' ''' ) )
return bpe_tokens
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] ) -> Optional[Any]:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return self.decoder.get(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] ) -> Any:
_UpperCamelCase = ''''''.join(__UpperCamelCase )
_UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_UpperCamelCase = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , __UpperCamelCase : Any , __UpperCamelCase : Tuple=False , **__UpperCamelCase : Optional[int] ) -> Any:
_UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
_UpperCamelCase = ''' ''' + text
return (text, kwargs)
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , ) -> dict:
_UpperCamelCase = super()._pad(
encoded_inputs=__UpperCamelCase , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(__UpperCamelCase )
if needs_to_be_padded:
_UpperCamelCase = len(__UpperCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 342
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCamelCase__ : str = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 578
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowerCamelCase : List[str] = logging.get_logger(__name__)
def A__ ( _a : np.ndarray , _a : Union[int, Iterable[int]] , _a : bool , _a : int ):
'''simple docstring'''
def constraint_to_multiple_of(_a : List[str] , _a : int , _a : Tuple=0 , _a : Any=None ):
snake_case__ : Optional[int] =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case__ : Any =math.floor(val / multiple ) * multiple
if x < min_val:
snake_case__ : Union[str, Any] =math.ceil(val / multiple ) * multiple
return x
snake_case__ : Optional[Any] =(output_size, output_size) if isinstance(_a , _a ) else output_size
snake_case__ , snake_case__ : Any =get_image_size(_a )
snake_case__ , snake_case__ : Optional[Any] =output_size
# determine new height and width
snake_case__ : int =output_height / input_height
snake_case__ : List[str] =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case__ : List[str] =scale_width
else:
# fit height
snake_case__ : List[Any] =scale_height
snake_case__ : Any =constraint_to_multiple_of(scale_height * input_height , multiple=_a )
snake_case__ : Dict =constraint_to_multiple_of(scale_width * input_width , multiple=_a )
return (new_height, new_width)
class _lowercase ( _A ):
_a : Tuple = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = False , a = 1 , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , **a , ):
super().__init__(**a )
snake_case__ : Dict =size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
snake_case__ : Union[str, Any] =get_size_dict(a )
snake_case__ : str =do_resize
snake_case__ : Union[str, Any] =size
snake_case__ : Any =keep_aspect_ratio
snake_case__ : List[Any] =ensure_multiple_of
snake_case__ : List[str] =resample
snake_case__ : Dict =do_rescale
snake_case__ : str =rescale_factor
snake_case__ : Any =do_normalize
snake_case__ : List[Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , a , a , a = False , a = 1 , a = PILImageResampling.BICUBIC , a = None , **a , ):
snake_case__ : Dict =get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case__ : Union[str, Any] =get_resize_output_image_size(
a , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def lowercase__ ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def lowercase__ ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def lowercase__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
snake_case__ : Tuple =do_resize if do_resize is not None else self.do_resize
snake_case__ : Any =size if size is not None else self.size
snake_case__ : int =get_size_dict(a )
snake_case__ : Optional[int] =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case__ : Tuple =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case__ : Any =resample if resample is not None else self.resample
snake_case__ : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : str =do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] =image_mean if image_mean is not None else self.image_mean
snake_case__ : Optional[Any] =image_std if image_std is not None else self.image_std
snake_case__ : List[str] =make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] =[to_numpy_array(a ) for image in images]
if do_resize:
snake_case__ : int =[self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
snake_case__ : Optional[int] =[self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
snake_case__ : str =[self.normalize(image=a , mean=a , std=a ) for image in images]
snake_case__ : List[Any] =[to_channel_dimension_format(a , a ) for image in images]
snake_case__ : Tuple ={"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
def lowercase__ ( self , a , a = None ):
snake_case__ : Dict =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(a ):
snake_case__ : Any =target_sizes.numpy()
snake_case__ : Union[str, Any] =[]
for idx in range(len(a ) ):
snake_case__ : int =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=a )
snake_case__ : Optional[Any] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
snake_case__ : Union[str, Any] =logits.argmax(dim=1 )
snake_case__ : Any =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 385
| 0
|
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->Optional[int]:
"""simple docstring"""
a_ = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
a_ , a_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
a_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase )
assert base_extractor.is_extractable(UpperCAmelCase )
a_ = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(UpperCAmelCase , UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a_ = file_path.read_text(encoding="utf-8" )
else:
a_ = output_path.read_text(encoding="utf-8" )
a_ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->Optional[int]:
"""simple docstring"""
a_ = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
a_ = input_paths[compression_format]
if input_path is None:
a_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase )
a_ = Extractor.infer_extractor_format(UpperCAmelCase )
assert extractor_format is not None
a_ = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a_ = file_path.read_text(encoding="utf-8" )
else:
a_ = output_path.read_text(encoding="utf-8" )
a_ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
import tarfile
a_ = tmp_path / "data_dot_dot"
directory.mkdir()
a_ = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(UpperCAmelCase , "w" ) as f:
f.add(UpperCAmelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
import tarfile
a_ = tmp_path / "data_sym_link"
directory.mkdir()
a_ = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=UpperCAmelCase )
with tarfile.TarFile(UpperCAmelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
a_ = insecure_tar_files[insecure_tar_file]
a_ = tmp_path / "extracted"
TarExtractor.extract(UpperCAmelCase , UpperCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
a_ = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(UpperCAmelCase )
assert zipfile.is_zipfile(str(UpperCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCAmelCase ) # but we're right
| 210
|
"""simple docstring"""
from math import sqrt
def UpperCamelCase ( UpperCAmelCase = 1_000_000 ) ->int:
"""simple docstring"""
a_ = 0
a_ = 0
a_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_SCREAMING_SNAKE_CASE = []
for char_count in range(SCREAMING_SNAKE_CASE_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 591
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
_SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
_SCREAMING_SNAKE_CASE = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
_SCREAMING_SNAKE_CASE = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_SCREAMING_SNAKE_CASE = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
_SCREAMING_SNAKE_CASE = load_in_abit
_SCREAMING_SNAKE_CASE = load_in_abit
_SCREAMING_SNAKE_CASE = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
_SCREAMING_SNAKE_CASE = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
_SCREAMING_SNAKE_CASE = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_SCREAMING_SNAKE_CASE = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
_SCREAMING_SNAKE_CASE = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
_SCREAMING_SNAKE_CASE = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
_SCREAMING_SNAKE_CASE = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
_SCREAMING_SNAKE_CASE = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = special_dtypes
_SCREAMING_SNAKE_CASE = no_split_module_classes
_SCREAMING_SNAKE_CASE = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_SCREAMING_SNAKE_CASE = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE = max_memory
_SCREAMING_SNAKE_CASE = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
_SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_SCREAMING_SNAKE_CASE = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False
for name, module in model.named_children():
if current_key_name is None:
_SCREAMING_SNAKE_CASE = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_SCREAMING_SNAKE_CASE = """.""".join(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_SCREAMING_SNAKE_CASE = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_SCREAMING_SNAKE_CASE = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
_SCREAMING_SNAKE_CASE = module.weight.data
if module.bias is not None:
_SCREAMING_SNAKE_CASE = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = True
if len(list(module.children() ) ) > 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
_SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_SCREAMING_SNAKE_CASE = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_SCREAMING_SNAKE_CASE = sum(SCREAMING_SNAKE_CASE_ , [] )
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
_SCREAMING_SNAKE_CASE = False
if hasattr(SCREAMING_SNAKE_CASE_ , """base_model_prefix""" ):
_SCREAMING_SNAKE_CASE = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_SCREAMING_SNAKE_CASE = list(model.named_children() )
_SCREAMING_SNAKE_CASE = [list_modules[-1][0]]
# add last module together with tied weights
_SCREAMING_SNAKE_CASE = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
_SCREAMING_SNAKE_CASE = [""".weight""", """.bias"""]
_SCREAMING_SNAKE_CASE = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_SCREAMING_SNAKE_CASE = name.replace(SCREAMING_SNAKE_CASE_ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = param_name
_SCREAMING_SNAKE_CASE = model
if "." in tensor_name:
_SCREAMING_SNAKE_CASE = tensor_name.split(""".""" )
for split in splits[:-1]:
_SCREAMING_SNAKE_CASE = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_SCREAMING_SNAKE_CASE = new_module
_SCREAMING_SNAKE_CASE = splits[-1]
# offload weights
_SCREAMING_SNAKE_CASE = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """meta""" , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 591
| 1
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowercase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowercase__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowercase__ )
return parser.parse_args()
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE : Optional[int] = script_fpath.stem
SCREAMING_SNAKE_CASE : Tuple = importlib.import_module(lowercase__ )
# Patch sys.argv
SCREAMING_SNAKE_CASE : int = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 719
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
__UpperCAmelCase = """▁"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = vocab_file
SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 79
| 0
|
def _lowerCamelCase ( snake_case = 10 ):
if not isinstance(snake_case , snake_case ) or n < 0:
raise ValueError('Invalid input' )
_lowerCAmelCase = 10**n
_lowerCAmelCase = 28_433 * (pow(2 , 7_830_457 , snake_case )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 192
|
import functools
def _lowerCamelCase ( snake_case , snake_case ):
# Validation
if not isinstance(snake_case , snake_case ) or not all(isinstance(snake_case , snake_case ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(snake_case ) != 3 or not all(isinstance(snake_case , snake_case ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(snake_case ) == 0:
return 0
if min(snake_case ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(snake_case ) >= 366:
raise ValueError('All days elements should be less than 366' )
_lowerCAmelCase = set(snake_case )
@functools.cache
def dynamic_programming(snake_case ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192
| 1
|
"""simple docstring"""
from math import factorial, pi
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] = 3_0 ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
snake_case_ : Union[str, Any] = float(__SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any = 3_0 ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
snake_case_ : List[Any] = float(__SCREAMING_SNAKE_CASE )
snake_case_ : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 701
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 0
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase =get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( _lowercase , unittest.TestCase ):
lowercase__ : int = AlbertTokenizer
lowercase__ : Optional[Any] = AlbertTokenizerFast
lowercase__ : str = True
lowercase__ : Dict = True
lowercase__ : Dict = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = '''this is a test'''
A__ = '''this is a test'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
A__ = '''<pad>'''
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(UpperCamelCase__ ) , 3_00_00 )
def lowercase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def lowercase_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.tokenize(UpperCamelCase__ )
A__ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(UpperCamelCase__ )
A__ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = AlbertTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [48, 25, 21, 12_89] )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
A__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def lowercase_ ( self ):
'''simple docstring'''
A__ = AlbertTokenizer(UpperCamelCase__ )
A__ = tokenizer.encode("sequence builders" )
A__ = tokenizer.encode("multi-sequence build" )
A__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 337
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowerCamelCase_ : Tuple = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase_ : int = model(A )['''last_hidden_state''']
lowerCamelCase_ : List[Any] = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
lowerCamelCase_ : Dict = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 422
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
snake_case__ : Optional[Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BigBirdTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 637
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 74
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = "▁"
UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase : int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase : Dict = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
UpperCAmelCase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
lowercase__ = []
def __init__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Union[str, Any]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase_))
lowercase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ = 1
lowercase_ = len(self.sp_model)
lowercase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase_)
}
lowercase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
lowercase_ = src_lang if src_lang is not None else """en_XX"""
lowercase_ = self.lang_code_to_id[self._src_lang]
lowercase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Dict):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
lowercase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
lowercase_ = [1] * len(self.prefix_tokens)
lowercase_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_)) + ([0] * len(lowerCAmelCase_)) + suffix_ones
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Any):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
lowercase_ = src_lang
lowercase_ = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = self.convert_tokens_to_ids(lowerCAmelCase_)
lowercase_ = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ = self.sp_model.PieceToId(lowerCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = """""".join(lowerCAmelCase_).replace(lowerCAmelCase_ , """ """).strip()
return out_string
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = src_lang
lowercase_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[src_lang]
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[lang]
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
| 567
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
lowercase__ = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowercase__ = components[:-1] + [test_fn.replace('.py' , '' )]
lowercase__ = '.'.join(UpperCamelCase__ )
return test_module_path
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = get_module_path(UpperCamelCase__ )
lowercase__ = importlib.import_module(UpperCamelCase__ )
return test_module
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = []
lowercase__ = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = []
lowercase__ = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
lowercase__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase__ = getattr(UpperCamelCase__ , 'all_model_classes' , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = get_test_classes(UpperCamelCase__ )
lowercase__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = test_class()
if hasattr(UpperCamelCase__ , 'setUp' ):
test.setUp()
lowercase__ = None
if hasattr(UpperCamelCase__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase__ = test.model_tester.__class__
return model_tester
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_test_classes(UpperCamelCase__ )
lowercase__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
lowercase__ = []
for test_class in test_classes:
lowercase__ = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = get_test_classes(UpperCamelCase__ )
lowercase__ = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = get_model_classes(UpperCamelCase__ )
lowercase__ = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = get_model_classes(UpperCamelCase__ )
lowercase__ = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__lowerCAmelCase : Dict =r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
snake_case__ : int = 'rag'
snake_case__ : Optional[int] = True
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=" / " , __lowerCAmelCase=" // " , __lowerCAmelCase=5 , __lowerCAmelCase=300 , __lowerCAmelCase=768 , __lowerCAmelCase=8 , __lowerCAmelCase="wiki_dpr" , __lowerCAmelCase="train" , __lowerCAmelCase="compressed" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
bos_token_id=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , prefix=__lowerCAmelCase , vocab_size=__lowerCAmelCase , **__lowerCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase = kwargs.pop("""question_encoder""" )
lowercase = question_encoder_config.pop("""model_type""" )
lowercase = kwargs.pop("""generator""" )
lowercase = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
lowercase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = reduce_loss
lowercase = label_smoothing
lowercase = exclude_bos_score
lowercase = do_marginalize
lowercase = title_sep
lowercase = doc_sep
lowercase = n_docs
lowercase = max_combined_length
lowercase = dataset
lowercase = dataset_split
lowercase = index_name
lowercase = retrieval_vector_size
lowercase = retrieval_batch_size
lowercase = passages_path
lowercase = index_path
lowercase = use_dummy_dataset
lowercase = output_retrieved
lowercase = do_deduplication
lowercase = use_cache
if self.forced_eos_token_id is None:
lowercase = getattr(self.generator , """forced_eos_token_id""" , __lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.question_encoder.to_dict()
lowercase = self.generator.to_dict()
lowercase = self.__class__.model_type
return output
| 359
|
from copy import deepcopy
class __UpperCamelCase :
def __init__( self : List[str] , lowerCAmelCase : list[int] | None = None , lowerCAmelCase : int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
UpperCAmelCase_ = size
UpperCAmelCase_ = [0] * size
elif arr is not None:
self.init(lowerCAmelCase )
else:
raise ValueError("Either arr or size must be specified" )
def __A ( self : Tuple , lowerCAmelCase : list[int] ):
'''simple docstring'''
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = deepcopy(lowerCAmelCase )
for i in range(1 , self.size ):
UpperCAmelCase_ = self.next_(lowerCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCAmelCase_ = self.next_(lowerCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __A ( lowerCAmelCase : int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def __A ( lowerCAmelCase : int ):
'''simple docstring'''
return index - (index & (-index))
def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCAmelCase_ = self.next_(lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
self.add(lowerCAmelCase , value - self.get(lowerCAmelCase ) )
def __A ( self : Tuple , lowerCAmelCase : int ):
'''simple docstring'''
if right == 0:
return 0
UpperCAmelCase_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCAmelCase_ = self.prev(lowerCAmelCase )
return result
def __A ( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
return self.prefix(lowerCAmelCase ) - self.prefix(lowerCAmelCase )
def __A ( self : Any , lowerCAmelCase : int ):
'''simple docstring'''
return self.query(lowerCAmelCase , index + 1 )
def __A ( self : List[str] , lowerCAmelCase : int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
UpperCAmelCase_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCAmelCase_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162
| 0
|
import functools
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(UpperCamelCase )
@functools.cache
def min_distance(UpperCamelCase: int , UpperCamelCase: int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowerCAmelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCamelCase ) , 1 + min_distance(UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCamelCase_ = False
class a ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
image=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
__lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 376
| 1
|
"""simple docstring"""
def UpperCAmelCase ( a__ ):
'''simple docstring'''
if not isinstance(a__ , a__ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
lowerCAmelCase :Optional[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( UpperCamelCase , unittest.TestCase ):
lowercase_ : Optional[int] = SpeechTaTokenizer
lowercase_ : Dict = False
lowercase_ : Any = True
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase :Union[str, Any] = SpeechTaTokenizer(UpperCAmelCase )
lowerCAmelCase :Any = AddedToken('<mask>' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
lowerCAmelCase :List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Any ) -> Union[str, Any]:
lowerCAmelCase :Tuple = 'this is a test'
lowerCAmelCase :Union[str, Any] = 'this is a test'
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=20 , UpperCAmelCase : str=5 ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase :Dict = self.get_input_output_texts(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCAmelCase :Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
lowerCAmelCase :List[Any] = '<pad>'
lowerCAmelCase :Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Any:
lowerCAmelCase :Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCAmelCase ) , 81 )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
lowerCAmelCase :Any = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase :Optional[int] = tokenizer.vocab_size
lowerCAmelCase :Union[str, Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase :Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase :Union[str, Any] = tokenizer.add_tokens(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.vocab_size
lowerCAmelCase :List[Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size + len(UpperCAmelCase ) )
lowerCAmelCase :int = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase :List[Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase :str = tokenizer.add_special_tokens(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = tokenizer.vocab_size
lowerCAmelCase :Tuple = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size_a + len(UpperCAmelCase ) )
lowerCAmelCase :List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : str ) -> int:
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase :Optional[int] = self.get_tokenizer()
lowerCAmelCase :List[str] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowerCAmelCase :List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowerCAmelCase :Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# fmt: off
self.assertListEqual(UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowerCAmelCase :int = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
lowerCAmelCase :Any = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowerCAmelCase :List[str] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCAmelCase , )
| 553
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = torch.device("cpu")
def lowerCAmelCase__() -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = dct.pop(__snake_case )
lowerCamelCase__ = val
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = []
for k in state_dict.keys():
lowerCamelCase__ = k
if ".pwconv" in k:
lowerCamelCase__ = k_new.replace('''.pwconv''' ,'''.point_wise_conv''' )
if ".dwconv" in k:
lowerCamelCase__ = k_new.replace('''.dwconv''' ,'''.depth_wise_conv''' )
if ".Proj." in k:
lowerCamelCase__ = k_new.replace('''.Proj.''' ,'''.proj.''' )
if "patch_embed" in k_new:
lowerCamelCase__ = k_new.replace('''patch_embed''' ,'''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCamelCase__ = k_new.split('''.''' )
if ls[2].isdigit():
lowerCamelCase__ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCamelCase__ = k_new.replace('''network''' ,'''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__ = 1000
lowerCamelCase__ = '''huggingface/label-files'''
lowerCamelCase__ = '''imagenet-1k-id2label.json'''
lowerCamelCase__ = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase__ = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase__ = [3, 3, 6, 4]
lowerCamelCase__ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase__ = [3, 3, 9, 6]
lowerCamelCase__ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase__ = [4, 3, 10, 5]
lowerCamelCase__ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase__ = [4, 4, 12, 6]
lowerCamelCase__ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__snake_case ,map_location='''cpu''' ,check_hash=__snake_case )
else:
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )
lowerCamelCase__ = checkpoint
lowerCamelCase__ = create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case ,__snake_case ,__snake_case )
# load HuggingFace model
lowerCamelCase__ = SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCamelCase__ = processor(images=__snake_case ,return_tensors='''pt''' )
# compare outputs from both models
lowerCamelCase__ = get_expected_output(__snake_case )
lowerCamelCase__ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] ,__snake_case ,atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_a = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 29
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
lowerCamelCase__ = TestCommand(*__snake_case )
test_command.run()
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case )
lowerCamelCase__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29
| 1
|
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase_( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 400
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = GPTaTokenizer
a_ : Dict = GPTaTokenizerFast
a_ : List[Any] = True
a_ : str = {"""add_prefix_space""": True}
a_ : Dict = False
def lowerCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : List[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCAmelCase_ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Optional[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowerCamelCase ( self : Optional[Any] , **a_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : List[Any] , **a_ : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : Tuple , a_ : Optional[Any] ):
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : Dict = "lower newer"
return input_text, output_text
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : List[str] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
lowerCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : int = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Optional[int] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : int = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : Any = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : List[Any] = tokenizer.encode(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : str = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
lowerCAmelCase_ : List[str] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : Dict , *a_ : Optional[int] , **a_ : Union[str, Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase ( self : Optional[int] , a_ : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
lowerCAmelCase_ : Optional[int] = "This is a simple input"
lowerCAmelCase_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : List[str] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Tuple = "This is a simple input"
lowerCAmelCase_ : Any = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(a_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
lowerCAmelCase_ : Dict = tokenizer(*a_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCAmelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Tuple = "$$$"
lowerCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
lowerCAmelCase_ : Tuple = "This is a simple input"
lowerCAmelCase_ : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Optional[Any] = tokenizer.bos_token_id
lowerCAmelCase_ : int = tokenizer(a_ )
lowerCAmelCase_ : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : int = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase ( self : List[str] ):
pass
def lowerCamelCase ( self : List[Any] ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCAmelCase_ : int = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Optional[Any] = "Encode this."
lowerCAmelCase_ : List[str] = "This one too please."
lowerCAmelCase_ : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCAmelCase_ : Dict = tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
lowerCAmelCase_ : List[str] = encoded_sequence_dict["input_ids"]
lowerCAmelCase_ : Optional[Any] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(a_ ) , len(a_ ) )
lowerCAmelCase_ : str = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
lowerCAmelCase_ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : List[Any] = "A photo of a cat"
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("./test_opt" )
lowerCAmelCase_ : Optional[int] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=a_ )
lowerCAmelCase_ : Any = "A photo of a cat"
lowerCAmelCase_ : List[str] = tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : Tuple = "bos"
lowerCAmelCase_ : Dict = tokenizer.get_vocab()["bos"]
lowerCAmelCase_ : List[Any] = "A photo of a cat"
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowerCAmelCase_ : int = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 610
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
__lowerCamelCase : List[Any] = import_module('tasks' )
try:
__lowerCamelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , model_args.task_type )
__lowerCamelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__lowerCamelCase : Dict = token_classification_task.get_labels(data_args.labels )
__lowerCamelCase : Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} , cache_dir=model_args.cache_dir , )
__lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__lowerCamelCase : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCamelCase : Dict = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCamelCase : Any = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple[List[int], List[int]]:
__lowerCamelCase : Any = np.argmax(SCREAMING_SNAKE_CASE__ , axis=2 )
__lowerCamelCase , __lowerCamelCase : List[str] = preds.shape
__lowerCamelCase : List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase : List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict:
__lowerCamelCase , __lowerCamelCase : List[str] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"precision": precision_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"recall": recall_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"f1": fa_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
}
# Data collator
__lowerCamelCase : Any = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCamelCase : Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : int = trainer.evaluate()
__lowerCamelCase : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE__ )
# Predict
if training_args.do_predict:
__lowerCamelCase : Optional[Any] = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = trainer.predict(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = align_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__lowerCamelCase : Union[str, Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return results
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 230
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if point:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
__lowerCamelCase : List[Any] = (
'Expected a list of numbers as input, found '
f'{type(SCREAMING_SNAKE_CASE__ ).__name__}'
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Tuple = f'Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}'
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError('Missing an input' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_lowerCAmelCase: Any = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def _lowercase( __a : str = "dhaka" , __a : int = 5 ):
a__ =min(__a , 50 ) # Prevent abuse!
a__ ={
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
a__ =requests.get('https://www.google.com/search' , params=__a , headers=__a )
a__ =BeautifulSoup(html.text , 'html.parser' )
a__ =''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
a__ =json.dumps(__a )
a__ =json.loads(__a )
a__ =re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __a , )
if not matched_google_image_data:
return 0
a__ =re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__a ) , )
a__ =re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __a , )
for index, fixed_full_res_image in enumerate(__a ):
if index >= max_images:
return index
a__ =bytes(__a , 'ascii' ).decode(
'unicode-escape' )
a__ =bytes(__a , 'ascii' ).decode(
'unicode-escape' )
a__ =urllib.request.build_opener()
a__ =[
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(__a )
a__ =f"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(__a ):
os.makedirs(__a )
urllib.request.urlretrieve( # noqa: S310
__a , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_lowerCAmelCase: Optional[Any] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 20
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37
| 0
|
from PIL import Image
def __A ( _A , _A ):
"""simple docstring"""
__a = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_A ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_A )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
SCREAMING_SNAKE_CASE : str = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 701
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """xlm-roberta-xl"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=25_08_80 , __SCREAMING_SNAKE_CASE : Dict=25_60 , __SCREAMING_SNAKE_CASE : List[str]=36 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : int=1_02_40 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : int=5_14 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-05 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]="absolute" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : str , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class A_ ( a_ ):
@property
def _UpperCAmelCase ( self : List[str] ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 525
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''markuplm'''
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=2_1_6 , __lowerCAmelCase=1_0_0_1 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5_0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_size
__magic_name__ :List[Any] = hidden_size
__magic_name__ :Optional[Any] = num_hidden_layers
__magic_name__ :int = num_attention_heads
__magic_name__ :Optional[Any] = hidden_act
__magic_name__ :Tuple = intermediate_size
__magic_name__ :List[str] = hidden_dropout_prob
__magic_name__ :int = attention_probs_dropout_prob
__magic_name__ :Optional[Any] = max_position_embeddings
__magic_name__ :Dict = type_vocab_size
__magic_name__ :str = initializer_range
__magic_name__ :Optional[int] = layer_norm_eps
__magic_name__ :Optional[int] = position_embedding_type
__magic_name__ :Dict = use_cache
__magic_name__ :Union[str, Any] = classifier_dropout
# additional properties
__magic_name__ :str = max_depth
__magic_name__ :List[Any] = max_xpath_tag_unit_embeddings
__magic_name__ :Any = max_xpath_subs_unit_embeddings
__magic_name__ :Any = tag_pad_id
__magic_name__ :Dict = subs_pad_id
__magic_name__ :int = xpath_unit_hidden_size
| 0
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = analyze_text(UpperCamelCase__ )
__lowerCamelCase = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__lowerCamelCase = sum(single_char_strings.values() )
# one length string
__lowerCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowerCamelCase = single_char_strings[ch]
__lowerCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__lowerCamelCase = sum(two_char_strings.values() )
__lowerCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowerCamelCase = cha + cha
if sequence in two_char_strings:
__lowerCamelCase = two_char_strings[sequence]
__lowerCamelCase = int(UpperCamelCase__ ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> tuple[dict, dict]:
"""simple docstring"""
__lowerCamelCase = Counter() # type: ignore
__lowerCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 469
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = 0
def _UpperCamelCase ( self : Any ) -> str:
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
_UpperCamelCase = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self : int ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
class UpperCAmelCase_ ( _lowercase):
snake_case__ = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 342
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase ( a__ : np.ndarray ) -> bool:
return np.array_equal(a__ , matrix.conjugate().T )
def lowercase ( a__ : np.ndarray , a__ : np.ndarray ) -> Any:
_UpperCamelCase = v.conjugate().T
_UpperCamelCase = v_star.dot(a__ )
assert isinstance(a__ , np.ndarray )
return (v_star_dot.dot(a__ )) / (v_star.dot(a__ ))
def lowercase ( ) -> None:
_UpperCamelCase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(a__ ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(a__ , a__ ) )
_UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(a__ ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(a__ , a__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 342
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=2 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : int = 7
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Dict = 99
SCREAMING_SNAKE_CASE_ : int = 384
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : Tuple = 4
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 37
SCREAMING_SNAKE_CASE_ : Tuple = 'gelu'
SCREAMING_SNAKE_CASE_ : Any = 0.1
SCREAMING_SNAKE_CASE_ : Tuple = 0.1
SCREAMING_SNAKE_CASE_ : Optional[int] = 512
SCREAMING_SNAKE_CASE_ : str = 16
SCREAMING_SNAKE_CASE_ : List[str] = 2
SCREAMING_SNAKE_CASE_ : Tuple = 0.02
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : List[str] = 4
SCREAMING_SNAKE_CASE_ : Tuple = 128
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : List[Any] = 9
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : int = None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : str = ConvBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=snake_case__ ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMaskedLM(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForSequenceClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForMultipleChoice(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tf.tile(tf.expand_dims(snake_case__ ,1 ) ,(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ ,1 ) ,(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ ,1 ) ,(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForTokenClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = TFConvBertForQuestionAnswering(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE_ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__a : Optional[Any] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : Any = False
__a : List[Any] = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
if hasattr(snake_case__ ,'use_cache' ):
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(self.model_tester ,'encoder_seq_length' ,self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(self.model_tester ,'key_length' ,snake_case__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(snake_case__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ ,saved_model=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(snake_case__ ,'saved_model' ,'1' )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : int = outputs['encoder_hidden_states']
SCREAMING_SNAKE_CASE_ : Any = outputs['encoder_attentions']
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs['hidden_states']
SCREAMING_SNAKE_CASE_ : List[Any] = outputs['attentions']
self.assertEqual(len(snake_case__ ) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) ,snake_case__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) ,[self.model_tester.seq_length, self.model_tester.hidden_size] ,)
self.assertEqual(len(snake_case__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester ,'decoder_seq_length' ,self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester ,'encoder_seq_length' ,self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : str = getattr(self.model_tester ,'key_length' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester ,'key_length' ,snake_case__ )
def check_decoder_attentions_output(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = len(snake_case__ )
self.assertEqual(out_len % 2 ,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(snake_case__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] ,)
def check_encoder_attentions_output(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : int = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(self._prepare_for_class(snake_case__ ,snake_case__ ) )
SCREAMING_SNAKE_CASE_ : str = len(snake_case__ )
self.assertEqual(config.output_hidden_states ,snake_case__ )
check_encoder_attentions_output(snake_case__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Any = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(self._prepare_for_class(snake_case__ ,snake_case__ ) )
self.assertEqual(config.output_hidden_states ,snake_case__ )
check_decoder_attentions_output(snake_case__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(snake_case__ ,snake_case__ ) )
self.assertEqual(config.output_hidden_states ,snake_case__ )
check_encoder_attentions_output(snake_case__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(snake_case__ ,snake_case__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(snake_case__ ) )
self.assertEqual(model.config.output_hidden_states ,snake_case__ )
check_encoder_attentions_output(snake_case__ )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
SCREAMING_SNAKE_CASE_ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,snake_case__ ,atol=1E-4 )
| 105
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class snake_case_ ( a_ ):
def __init__( self , a_="" , a_="train" ):
assert os.path.isdir(a_ )
a_ : List[Any] = []
a_ : int = os.listdir(a_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
a_ : str = os.path.join(a_ , a_ )
if not os.path.isfile(a_ ):
continue
self.documents.append(a_ )
def __len__( self ):
return len(self.documents )
def __getitem__( self , a_ ):
a_ : Optional[int] = self.documents[idx]
a_ : Optional[int] = document_path.split("/" )[-1]
with open(a_ , encoding="utf-8" ) as source:
a_ : Tuple = source.read()
a_ , a_ : Union[str, Any] = process_story(a_ )
return document_name, story_lines, summary_lines
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any:
a_ : int = list(filter(lambda SCREAMING_SNAKE_CASE__ : len(SCREAMING_SNAKE_CASE__ ) != 0, [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
a_ : Optional[int] = [_add_missing_period(SCREAMING_SNAKE_CASE__ ) for line in nonempty_lines]
# gather article lines
a_ : Dict = []
a_ : int = deque(SCREAMING_SNAKE_CASE__ )
while True:
try:
a_ : Optional[Any] = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(SCREAMING_SNAKE_CASE__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
a_ : List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE__ : not t.startswith("@highlight" ), SCREAMING_SNAKE_CASE__ ) )
return story_lines, summary_lines
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
a_ : Dict = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[str]:
if len(SCREAMING_SNAKE_CASE__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE__ )) )
return sequence
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Union[str, Any] = torch.ones_like(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = sequence == pad_token_id
a_ : Tuple = 0
return mask
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
a_ : List[str] = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in story_lines]
a_ : Optional[int] = [token for sentence in story_lines_token_ids for token in sentence]
a_ : str = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in summary_lines]
a_ : List[Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : Tuple = []
for sequence in batch:
a_ : List[str] = -1
a_ : Dict = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE__ )
return torch.tensor(SCREAMING_SNAKE_CASE__ )
| 237
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ViTImageProcessor'''
a__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[str] , a : Dict=None , a : List[str]=None , **a : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a , )
SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a , a )
def __call__( self : List[Any] , a : int=None , a : Dict=None , a : int=None , a : Tuple=None , **a : Any ) -> Any:
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _UpperCAmelCase ( self : Dict , *a : Dict , **a : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self : Tuple , *a : List[Any] , **a : Union[str, Any] ) -> Dict:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : Optional[int] ) -> str:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a , )
return self.image_processor
| 714
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__A : str = logging.get_logger(__name__)
__A : str = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] , a : Tuple=None , **a : Any ) -> List[str]:
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = kwargs.get("""model_save_dir""" , a )
SCREAMING_SNAKE_CASE = kwargs.get("""latest_model_name""" , a )
def __call__( self : List[str] , **a : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE = {k: np.array(a ) for k, v in kwargs.items()}
return self.model.run(a , a )
@staticmethod
def _UpperCAmelCase ( a : Union[str, Path] , a : Any=None , a : Optional[int]=None ) -> Optional[Any]:
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE = """CPUExecutionProvider"""
return ort.InferenceSession(a , providers=[provider] , sess_options=a )
def _UpperCAmelCase ( self : str , a : Union[str, Path] , a : Optional[str] = None , **a : str ) -> Tuple:
SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE = Path(a ).joinpath(a )
try:
shutil.copyfile(a , a )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(a )
if src_path.exists():
SCREAMING_SNAKE_CASE = Path(a ).joinpath(a )
try:
shutil.copyfile(a , a )
except shutil.SameFileError:
pass
def _UpperCAmelCase ( self : Dict , a : Union[str, os.PathLike] , **a : Tuple , ) -> str:
if os.path.isfile(a ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(a , exist_ok=a )
# saving model weights/files
self._save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , a : Union[str, Path] , a : Optional[Union[bool, str, None]] = None , a : Optional[Union[str, None]] = None , a : bool = False , a : Optional[str] = None , a : Optional[str] = None , a : Optional[str] = None , a : Optional["ort.SessionOptions"] = None , **a : Union[str, Any] , ) -> List[str]:
SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(a ):
SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(
os.path.join(a , a ) , provider=a , sess_options=a )
SCREAMING_SNAKE_CASE = Path(a )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id=a , filename=a , use_auth_token=a , revision=a , cache_dir=a , force_download=a , )
SCREAMING_SNAKE_CASE = Path(a ).parent
SCREAMING_SNAKE_CASE = Path(a ).name
SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(a , provider=a , sess_options=a )
return cls(model=a , **a )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , a : Union[str, Path] , a : bool = True , a : Optional[str] = None , a : Optional[str] = None , **a : Union[str, Any] , ) -> Any:
SCREAMING_SNAKE_CASE = None
if len(str(a ).split("""@""" ) ) == 2:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_id.split("""@""" )
return cls._from_pretrained(
model_id=a , revision=a , cache_dir=a , force_download=a , use_auth_token=a , **a , )
| 450
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__SCREAMING_SNAKE_CASE : Any = 2
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , *, # begin keyword-only arguments
lowerCamelCase__="<s>" , lowerCamelCase__="<pad>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__=None , ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = bos, unk, pad, eos
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = {}
_lowerCamelCase = self.add_symbol(lowerCamelCase__ )
_lowerCamelCase = self.add_symbol(lowerCamelCase__ )
_lowerCamelCase = self.add_symbol(lowerCamelCase__ )
_lowerCamelCase = self.add_symbol(lowerCamelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase__ )
_lowerCamelCase = len(self.symbols )
def __eq__( self , lowerCamelCase__ ):
return self.indices == other.indices
def __getitem__( self , lowerCamelCase__ ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self , lowerCamelCase__ ):
return sym in self.indices
@classmethod
def snake_case__ ( cls , lowerCamelCase__ ):
_lowerCamelCase = cls()
d.add_from_file(lowerCamelCase__ )
return d
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False ):
if word in self.indices and not overwrite:
_lowerCamelCase = self.indices[word]
_lowerCamelCase = self.count[idx] + n
return idx
else:
_lowerCamelCase = len(self.symbols )
_lowerCamelCase = idx
self.symbols.append(lowerCamelCase__ )
self.count.append(lowerCamelCase__ )
return idx
def snake_case__ ( self , lowerCamelCase__ ):
return 0
def snake_case__ ( self , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
try:
with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(lowerCamelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(lowerCamelCase__ ) )
return
_lowerCamelCase = f.readlines()
_lowerCamelCase = self._load_meta(lowerCamelCase__ )
for line in lines[indices_start_line:]:
try:
_lowerCamelCase , _lowerCamelCase = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_lowerCamelCase = True
_lowerCamelCase , _lowerCamelCase = line.rsplit(''' ''' , 1 )
else:
_lowerCamelCase = False
_lowerCamelCase = int(lowerCamelCase__ )
_lowerCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(lowerCamelCase__ ) )
self.add_symbol(lowerCamelCase__ , n=lowerCamelCase__ , overwrite=lowerCamelCase__ )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_lowerCamelCase = dict((re.sub(r'''@@$''' , '''''' , lowercase_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , lowercase_ ), v) for k, v in d.items() )
_lowerCamelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_lowerCamelCase = d[k] # restore
return da
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : List[str] ) -> Tuple:
# prep
if not os.path.exists(lowercase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_lowerCamelCase = os.path.join(lowercase_ , '''checkpoint.pt''' )
if not os.path.isfile(lowercase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
_lowerCamelCase = chkpt['''cfg''']['''model''']
# dicts
_lowerCamelCase = os.path.join(lowercase_ , '''dict.txt''' )
if not os.path.isfile(lowercase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_lowerCamelCase = Dictionary.load(lowercase_ )
_lowerCamelCase = rewrite_dict_keys(src_dict.indices )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = os.path.join(lowercase_ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
_lowerCamelCase = os.path.join(lowercase_ , '''bpecodes''' )
if not os.path.isfile(lowercase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_lowerCamelCase = os.path.join(lowercase_ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowercase_ , lowercase_ )
# model config
_lowerCamelCase = os.path.join(lowercase_ , '''config.json''' )
_lowerCamelCase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
_lowerCamelCase = chkpt['''model''']
# remove unneeded keys
_lowerCamelCase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
_lowerCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_lowerCamelCase = model_state_dict.pop(lowercase_ )
else:
_lowerCamelCase = model_state_dict.pop(lowercase_ )
_lowerCamelCase = BioGptConfig.from_pretrained(lowercase_ )
_lowerCamelCase = BioGptForCausalLM(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ )
# save
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowercase_ , lowercase_ )
print('''Conversion is done!''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
_lowerCamelCase = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , np.reshape(lowerCamelCase__ , (1_2, 5) ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , reshape(lowerCamelCase__ , (1_2, 5) ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , reshape(lowerCamelCase__ , (1_2, 5) ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , np.asarray(reshape(lowerCamelCase__ , (1_2, 5) ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 661
| 1
|
import math
import sys
def a__ ( a ) -> str:
A_ : List[Any] = ''''''
try:
with open(a , '''rb''' ) as binary_file:
A_ : Any = binary_file.read()
for dat in data:
A_ : Tuple = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( a ) -> str:
A_ : Any = {'''0''': '''0''', '''1''': '''1'''}
A_ , A_ : Dict = '''''', ''''''
A_ : str = len(a )
for i in range(len(a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A_ : int = lexicon[curr_string]
result += last_match_id
A_ : Dict = last_match_id + '''0'''
if math.loga(a ).is_integer():
A_ : List[Any] = {}
for curr_key in list(a ):
A_ : Dict = lexicon.pop(a )
A_ : List[Any] = new_lex
A_ : List[str] = last_match_id + '''1'''
index += 1
A_ : int = ''''''
return result
def a__ ( a , a ) -> None:
A_ : str = 8
try:
with open(a , '''wb''' ) as opened_file:
A_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(a ) , a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(a , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( a ) -> str:
A_ : Optional[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A_ : List[Any] = data_bits[counter:]
A_ : int = data_bits[counter + 1 :]
return data_bits
def a__ ( a , a ) -> None:
A_ : Dict = read_file_binary(a )
A_ : int = remove_prefix(a )
A_ : Optional[int] = decompress_data(a )
write_file_binary(a , a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 236
|
import os
from collections.abc import Iterator
def a__ ( a = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(a ):
A_ : List[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a )[1] in (".py", ".ipynb"):
yield os.path.join(a , a ).lstrip('''./''' )
def a__ ( a ) -> int:
return f"""{i * ' '}*""" if i else "\n##"
def a__ ( a , a ) -> str:
A_ : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(a )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def a__ ( a = "." ) -> None:
A_ : List[str] = ''''''
for filepath in sorted(good_file_paths(a ) ):
A_ , A_ : List[Any] = os.path.split(a )
if filepath != old_path:
A_ : Dict = print_path(a , a )
A_ : Any = (filepath.count(os.sep ) + 1) if filepath else 0
A_ : Dict = f"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
A_ : Optional[int] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f"""{md_prefix(a )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 236
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCAmelCase_ : Any = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> str:
__A : List[Any] = EfficientNetConfig()
__A : Union[str, Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
__A : str = CONFIG_MAP[model_name]["""width_coef"""]
__A : List[Any] = CONFIG_MAP[model_name]["""depth_coef"""]
__A : Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
__A : Tuple = CONFIG_MAP[model_name]["""dropout_rate"""]
__A : Optional[Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__A : Optional[int] = """huggingface/label-files"""
__A : List[Any] = """imagenet-1k-id2label.json"""
__A : str = 1000
__A : Optional[Any] = json.load(open(hf_hub_download(a__ ,a__ ,repo_type="""dataset""" ) ,"""r""" ) )
__A : Optional[int] = {int(a__ ): v for k, v in idalabel.items()}
__A : Any = idalabel
__A : Any = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> int:
__A : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__A : int = Image.open(requests.get(a__ ,stream=a__ ).raw )
return im
def __SCREAMING_SNAKE_CASE ( a__ : Any ) -> Optional[int]:
__A : Any = CONFIG_MAP[model_name]["""image_size"""]
__A : Any = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] ,do_center_crop=a__ ,)
return preprocessor
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> str:
__A : Optional[int] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__A : Union[str, Any] = sorted(set(a__ ) )
__A : Union[str, Any] = len(a__ )
__A : Tuple = {b: str(a__ ) for b, i in zip(a__ ,range(a__ ) )}
__A : Tuple = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__A : Any = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__A : Dict = {}
for item in rename_keys:
if item[0] in original_param_names:
__A : Optional[Any] = """efficientnet.""" + item[1]
__A : Dict = """classifier.weight"""
__A : List[str] = """classifier.bias"""
return key_mapping
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : str ,a__ : Any ) -> Optional[int]:
for key, value in tf_params.items():
if "normalization" in key:
continue
__A : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
__A : Dict = torch.from_numpy(a__ ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
__A : List[Any] = torch.from_numpy(a__ ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
__A : Tuple = torch.from_numpy(np.transpose(a__ ) )
else:
__A : List[Any] = torch.from_numpy(a__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Dict ,a__ : Any ,a__ : Dict ) -> Tuple:
__A : Union[str, Any] = model_classes[model_name](
include_top=a__ ,weights="""imagenet""" ,input_tensor=a__ ,input_shape=a__ ,pooling=a__ ,classes=1000 ,classifier_activation="""softmax""" ,)
__A : str = original_model.trainable_variables
__A : Tuple = original_model.non_trainable_variables
__A : List[str] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__A : List[Any] = param.numpy()
__A : Union[str, Any] = list(tf_params.keys() )
# Load HuggingFace model
__A : str = get_efficientnet_config(a__ )
__A : int = EfficientNetForImageClassification(a__ ).eval()
__A : Optional[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__A : Dict = rename_keys(a__ )
replace_params(a__ ,a__ ,a__ )
# Initialize preprocessor and preprocess input image
__A : Tuple = convert_image_processor(a__ )
__A : Union[str, Any] = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__A : Union[str, Any] = hf_model(**a__ )
__A : str = outputs.logits.detach().numpy()
# Original model inference
__A : Union[str, Any] = False
__A : List[str] = CONFIG_MAP[model_name]["""image_size"""]
__A : Optional[Any] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
__A : Dict = image.img_to_array(a__ )
__A : int = np.expand_dims(a__ ,axis=0 )
__A : List[str] = original_model.predict(a__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a__ ,a__ ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(a__ ):
os.mkdir(a__ )
# Save converted model and image processor
hf_model.save_pretrained(a__ )
preprocessor.save_pretrained(a__ )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
__A : List[str] = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(a__ )
hf_model.push_to_hub(a__ )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 17
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class a__ ( lowerCamelCase_ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
requires_backends(self , "decord" )
self.check_model_type(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase : Optional[Any] = {}
if frame_sampling_rate is not None:
_lowercase : Optional[Any] = frame_sampling_rate
if num_frames is not None:
_lowercase : Optional[Any] = num_frames
_lowercase : str = {}
if top_k is not None:
_lowercase : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 ):
"""simple docstring"""
if num_frames is None:
_lowercase : Union[str, Any] = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
_lowercase : Union[str, Any] = BytesIO(requests.get(_UpperCamelCase ).content )
_lowercase : Tuple = VideoReader(_UpperCamelCase )
videoreader.seek(0 )
_lowercase : Tuple = 0
_lowercase : Tuple = num_frames * frame_sampling_rate - 1
_lowercase : Any = np.linspace(_UpperCamelCase , _UpperCamelCase , num=_UpperCamelCase , dtype=np.intaa )
_lowercase : Dict = videoreader.get_batch(_UpperCamelCase ).asnumpy()
_lowercase : str = list(_UpperCamelCase )
_lowercase : Any = self.image_processor(_UpperCamelCase , return_tensors=self.framework )
return model_inputs
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = self.model(**_UpperCamelCase )
return model_outputs
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : Tuple = self.model.config.num_labels
if self.framework == "pt":
_lowercase : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
_lowercase , _lowercase : Any = probs.topk(_UpperCamelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
_lowercase : Optional[int] = scores.tolist()
_lowercase : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 245
| 0
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_lowerCamelCase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Dict ="maskformer"
__A : List[Any] ={"hidden_size": "mask_feature_size"}
__A : Optional[Any] =["resnet", "swin"]
__A : Any =["detr"]
def __init__( self ,_snake_case = 2_56 ,_snake_case = 2_56 ,_snake_case = 0.1 ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = 0.02 ,_snake_case = 1.0 ,_snake_case = 1.0 ,_snake_case = 1.0 ,_snake_case = 20.0 ,_snake_case = None ,**_snake_case ,):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase_ : Tuple = SwinConfig(
image_size=3_84 ,in_channels=3 ,patch_size=4 ,embed_dim=1_28 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=["stage1", "stage2", "stage3", "stage4"] ,)
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = backbone_config.pop("model_type" )
UpperCAmelCase_ : str = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Optional[int] = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase_ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase_ : Any = (
decoder_config.pop("model_type" ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase_ : Optional[int] = config_class.from_dict(_snake_case )
UpperCAmelCase_ : List[str] = backbone_config
UpperCAmelCase_ : Tuple = decoder_config
# main feature dimension for the model
UpperCAmelCase_ : int = fpn_feature_size
UpperCAmelCase_ : str = mask_feature_size
# initializer
UpperCAmelCase_ : Optional[Any] = init_std
UpperCAmelCase_ : List[Any] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase_ : Any = cross_entropy_weight
UpperCAmelCase_ : str = dice_weight
UpperCAmelCase_ : Optional[Any] = mask_weight
UpperCAmelCase_ : int = use_auxiliary_loss
UpperCAmelCase_ : List[Any] = no_object_weight
UpperCAmelCase_ : Optional[Any] = output_auxiliary_logits
UpperCAmelCase_ : int = self.decoder_config.encoder_attention_heads
UpperCAmelCase_ : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCamelCase__ ( cls ,_snake_case ,_snake_case ,**_snake_case ):
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : Optional[Any] = self.decoder_config.to_dict()
UpperCAmelCase_ : Union[str, Any] = self.__class__.model_type
return output
| 709
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = 8 ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = do_rescale
UpperCAmelCase_ : int = rescale_factor
UpperCAmelCase_ : Optional[Any] = do_pad
UpperCAmelCase_ : List[str] = pad_size
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ):
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = get_image_size(_snake_case )
UpperCAmelCase_ : List[Any] = (old_height // size + 1) * size - old_height
UpperCAmelCase_ : Any = (old_width // size + 1) * size - old_width
return pad(_snake_case ,((0, pad_height), (0, pad_width)) ,mode="symmetric" ,data_format=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[int] = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ : Tuple = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ : Optional[int] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Tuple = [to_numpy_array(_snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ : Any = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_pad:
UpperCAmelCase_ : Dict = [self.pad(_snake_case ,size=_snake_case ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
UpperCAmelCase_ : Dict = {"pixel_values": images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 323
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( A , A , A , A=1024 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =[], []
UpperCAmelCase__ =list(zip(A , A ) )
UpperCAmelCase__ , UpperCAmelCase__ =sorted_examples[0]
def is_too_big(A ):
return tok(A , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ =new_src + " " + src
UpperCAmelCase__ =new_tgt + " " + tgt
if is_too_big(A ) or is_too_big(A ): # cant fit, finalize example
finished_src.append(A )
finished_tgt.append(A )
UpperCAmelCase__ , UpperCAmelCase__ =src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A )
finished_tgt.append(A )
return finished_src, finished_tgt
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =Path(A )
save_path.mkdir(exist_ok=A )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ =pack_examples(A , A , A , A )
print(F"""packed {split} split from {len(A )} examples -> {len(A )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(A ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(A ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(A , save_path / F"""{split}.source""" )
shutil.copyfile(A , save_path / F"""{split}.target""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=A , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=A , default=128 )
parser.add_argument("--data_dir" , type=A )
parser.add_argument("--save_path" , type=A )
UpperCAmelCase__ =parser.parse_args()
UpperCAmelCase__ =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 625
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( A ):
'''simple docstring'''
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case_ :
'''simple docstring'''
def __UpperCAmelCase ( self, A_, A_ ) -> Dict:
pass
def __UpperCAmelCase ( self ) -> List[str]:
pass
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[int]:
UpperCAmelCase__ =np.abs((a - b) ).max()
self.assertLessEqual(A_, A_, f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_=None, **A_ ) -> List[str]:
UpperCAmelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(A_, A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =model(input_ids=A_, pixel_values=A_, attention_mask=A_ )
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_=None, **A_ ) -> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ =self.get_vision_text_model(A_, A_ )
UpperCAmelCase__ ={"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
UpperCAmelCase__ =model(input_ids=A_, pixel_values=A_, attention_mask=A_ )
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_=None, **A_ ) -> Any:
UpperCAmelCase__ , UpperCAmelCase__ =self.get_vision_text_model(A_, A_ )
UpperCAmelCase__ ={"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
UpperCAmelCase__ =model(input_ids=A_, pixel_values=A_, attention_mask=A_ )
UpperCAmelCase__ =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(A_ )
UpperCAmelCase__ =model(input_ids=A_, pixel_values=A_, attention_mask=A_ )
UpperCAmelCase__ =after_output[0]
UpperCAmelCase__ =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_, 1E-3 )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_=None, **A_ ) -> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ =self.get_vision_text_model(A_, A_ )
UpperCAmelCase__ ={"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
UpperCAmelCase__ =model(
input_ids=A_, pixel_values=A_, attention_mask=A_, output_attentions=A_ )
UpperCAmelCase__ =output.vision_model_output.attentions
self.assertEqual(len(A_ ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ =to_atuple(vision_model.config.image_size )
UpperCAmelCase__ =to_atuple(vision_model.config.patch_size )
UpperCAmelCase__ =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase__ =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase__ =output.text_model_output.attentions
self.assertEqual(len(A_ ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Union[str, Any]:
pt_model.to(A_ )
pt_model.eval()
# prepare inputs
UpperCAmelCase__ =inputs_dict
UpperCAmelCase__ ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase__ =pt_model(**A_ ).to_tuple()
UpperCAmelCase__ =fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ), len(A_ ), "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(A_, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(A_, from_pt=A_ )
UpperCAmelCase__ =fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(len(A_ ), len(A_ ), "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(A_, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
UpperCAmelCase__ =VisionTextDualEncoderModel.from_pretrained(A_, from_flax=A_ )
pt_model_loaded.to(A_ )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase__ =pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(len(A_ ), len(A_ ), "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(A_, pt_output_loaded.numpy(), 4E-2 )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Tuple:
UpperCAmelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(A_, A_ )
UpperCAmelCase__ =VisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), A_ )
UpperCAmelCase__ =fx_state
self.check_pt_flax_equivalence(A_, A_, A_ )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Dict:
UpperCAmelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(A_, A_ )
UpperCAmelCase__ =VisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =load_flax_weights_in_pytorch_model(A_, fx_model.params )
self.check_pt_flax_equivalence(A_, A_, A_ )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A_ )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A_ )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.prepare_config_and_inputs()
self.check_save_load(**A_ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A_ )
@is_pt_flax_cross_test
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ =config_inputs_dict.pop("vision_config" )
UpperCAmelCase__ =config_inputs_dict.pop("text_config" )
UpperCAmelCase__ =config_inputs_dict
self.check_equivalence_pt_to_flax(A_, A_, A_ )
self.check_equivalence_flax_to_pt(A_, A_, A_ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ =self.get_pretrained_model_and_inputs()
UpperCAmelCase__ =model_a(**A_ )
UpperCAmelCase__ =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(A_ )
UpperCAmelCase__ =model_a(**A_ )
UpperCAmelCase__ =after_outputs[0]
UpperCAmelCase__ =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_, 1E-5 )
@require_flax
class snake_case_ ( a, unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert", vision_from_pt=A_, text_from_pt=A_, )
UpperCAmelCase__ =13
UpperCAmelCase__ =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
UpperCAmelCase__ =random_attention_mask([batch_size, 4] )
UpperCAmelCase__ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self, A_, A_ ) -> Optional[int]:
UpperCAmelCase__ =FlaxViTModel(A_ )
UpperCAmelCase__ =FlaxBertModel(A_ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =FlaxViTModelTester(self )
UpperCAmelCase__ =FlaxBertModelTester(self )
UpperCAmelCase__ =vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ =bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ =vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case_ ( a, unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert", vision_from_pt=A_, text_from_pt=A_, )
UpperCAmelCase__ =13
UpperCAmelCase__ =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
UpperCAmelCase__ =random_attention_mask([batch_size, 4] )
UpperCAmelCase__ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self, A_, A_ ) -> Tuple:
UpperCAmelCase__ =FlaxCLIPVisionModel(A_ )
UpperCAmelCase__ =FlaxBertModel(A_ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =FlaxCLIPVisionModelTester(self )
UpperCAmelCase__ =FlaxBertModelTester(self )
UpperCAmelCase__ =clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ =bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ =vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0 )
UpperCAmelCase__ =VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
UpperCAmelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ =processor(
text=["una foto di un gatto", "una foto di un cane"], images=A_, padding=A_, return_tensors="np" )
UpperCAmelCase__ =model(**A_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
UpperCAmelCase__ =np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image, A_, atol=1E-3 ) )
| 625
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=0.999 ,_lowerCAmelCase="cosine" ,):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
A_ : int = []
for i in range(_lowerCAmelCase ):
A_ : List[str] = i / num_diffusion_timesteps
A_ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) ,_lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase ,dtype=torch.floataa )
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
a = [e.name for e in KarrasDiffusionSchedulers]
a = 2
@register_to_config
def __init__( self , a__ = 1000 , a__ = 0.0_0085 , a__ = 0.012 , a__ = "linear" , a__ = None , a__ = "epsilon" , a__ = False , a__ = False , a__ = 1.0 , a__ = "linspace" , a__ = 0 , ):
if trained_betas is not None:
A_ : List[str] = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
A_ : Tuple = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : Any = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
A_ : Optional[int] = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
A_ : Any = 1.0 - self.betas
A_ : str = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self , a__ , a__=None ):
if schedule_timesteps is None:
A_ : Optional[int] = self.timesteps
A_ : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A_ : int = 1 if len(_lowerCAmelCase ) > 1 else 0
else:
A_ : List[Any] = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
A_ : List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self ):
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self , a__ , a__ , ):
A_ : int = self.index_for_timestep(_lowerCAmelCase )
A_ : Tuple = self.sigmas[step_index]
A_ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self , a__ , a__ = None , a__ = None , ):
A_ : int = num_inference_steps
A_ : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A_ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , _lowerCAmelCase , dtype=_lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A_ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : int = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A_ : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : int = (np.arange(_lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(_lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.""" )
A_ : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A_ : str = np.log(_lowerCAmelCase )
A_ : Tuple = np.interp(_lowerCAmelCase , np.arange(0 , len(_lowerCAmelCase ) ) , _lowerCAmelCase )
if self.config.use_karras_sigmas:
A_ : int = self._convert_to_karras(in_sigmas=_lowerCAmelCase , num_inference_steps=self.num_inference_steps )
A_ : List[Any] = np.array([self._sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) for sigma in sigmas] )
A_ : str = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A_ : Union[str, Any] = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase )
A_ : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A_ : Any = torch.from_numpy(_lowerCAmelCase )
A_ : Any = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowerCAmelCase ).startswith("""mps""" ):
# mps does not support float64
A_ : List[str] = timesteps.to(_lowerCAmelCase , dtype=torch.floataa )
else:
A_ : List[Any] = timesteps.to(device=_lowerCAmelCase )
# empty dt and derivative
A_ : Optional[int] = None
A_ : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A_ : List[str] = defaultdict(_lowerCAmelCase )
def _lowerCamelCase ( self , a__ , a__ ):
A_ : Dict = np.log(_lowerCAmelCase )
# get distribution
A_ : Optional[int] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A_ : int = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A_ : List[str] = low_idx + 1
A_ : Any = log_sigmas[low_idx]
A_ : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
A_ : int = (low - log_sigma) / (low - high)
A_ : int = np.clip(_lowerCAmelCase , 0 , 1 )
# transform interpolation to time range
A_ : Dict = (1 - w) * low_idx + w * high_idx
A_ : Union[str, Any] = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self , a__ , a__ ):
A_ : str = in_sigmas[-1].item()
A_ : str = in_sigmas[0].item()
A_ : Tuple = 7.0 # 7.0 is the value used in the paper
A_ : Union[str, Any] = np.linspace(0 , 1 , _lowerCAmelCase )
A_ : Optional[Any] = sigma_min ** (1 / rho)
A_ : Optional[Any] = sigma_max ** (1 / rho)
A_ : Dict = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self ):
return self.dt is None
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ = True , ):
A_ : Dict = self.index_for_timestep(_lowerCAmelCase )
# advance index counter by 1
A_ : str = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A_ : int = self.sigmas[step_index]
A_ : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A_ : Tuple = self.sigmas[step_index - 1]
A_ : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A_ : Optional[Any] = 0
A_ : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A_ : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
A_ : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A_ : int = sigma_hat if self.state_in_first_order else sigma_next
A_ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A_ : str = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
A_ : Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A_ : Any = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A_ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
A_ : List[Any] = derivative
A_ : int = dt
A_ : Optional[int] = sample
else:
# 2. 2nd order / Heun's method
A_ : Optional[Any] = (sample - pred_original_sample) / sigma_next
A_ : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A_ : Optional[Any] = self.dt
A_ : str = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A_ : str = None
A_ : Tuple = None
A_ : Optional[int] = None
A_ : Dict = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def _lowerCamelCase ( self , a__ , a__ , a__ , ):
A_ : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase ):
# mps does not support float64
A_ : Dict = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A_ : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A_ : Dict = self.timesteps.to(original_samples.device )
A_ : str = timesteps.to(original_samples.device )
A_ : Optional[int] = [self.index_for_timestep(_lowerCAmelCase , _lowerCAmelCase ) for t in timesteps]
A_ : Any = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A_ : Dict = sigma.unsqueeze(-1 )
A_ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 709
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase :
def __init__( self , a__ , a__=13 , a__=10 , a__=3 , a__=2 , a__=2 , a__=2 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=0.9 , a__=None , ):
A_ : Tuple = parent
A_ : Union[str, Any] = batch_size
A_ : str = image_size
A_ : Union[str, Any] = num_channels
A_ : List[str] = patch_size
A_ : Optional[Any] = tubelet_size
A_ : List[Any] = num_frames
A_ : str = is_training
A_ : List[Any] = use_labels
A_ : List[str] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : str = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Dict = hidden_act
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = type_sequence_label_size
A_ : int = initializer_range
A_ : Dict = mask_ratio
A_ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A_ : int = (image_size // patch_size) ** 2
A_ : Dict = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A_ : Dict = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self ):
A_ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[int] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Dict = VideoMAEModel(config=a__ )
model.to(a__ )
model.eval()
A_ : Dict = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Optional[Any] = VideoMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A_ : List[Any] = torch.ones((self.num_masks,) )
A_ : Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A_ : int = mask.expand(self.batch_size , -1 ).bool()
A_ : List[Any] = model(a__ , a__ )
# model only returns predictions for masked patches
A_ : Union[str, Any] = mask.sum().item()
A_ : Tuple = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[Any] = config_and_inputs
A_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def _lowerCamelCase ( self ):
A_ : int = VideoMAEModelTester(self )
A_ : Any = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _lowerCamelCase ( self , a__ , a__ , a__=False ):
A_ : Optional[Any] = copy.deepcopy(a__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A_ : List[Any] = torch.ones((self.model_tester.num_masks,) )
A_ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A_ : Union[str, Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
A_ : int = bool_masked_pos.to(a__ )
if return_labels:
if model_class in [
*get_values(a__ ),
]:
A_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _lowerCamelCase ( self ):
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(a__ )
A_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _lowerCamelCase ( self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCamelCase ( self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
@slow
def _lowerCamelCase ( self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = VideoMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _lowerCamelCase ( self ):
if not self.has_attentions:
pass
else:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[int] = True
for model_class in self.all_model_classes:
A_ : str = self.model_tester.seq_length - self.model_tester.num_masks
A_ : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A_ : Optional[int] = True
A_ : Optional[Any] = False
A_ : List[Any] = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : List[Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Dict = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : List[Any] = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : int = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Any = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A_ : Union[str, Any] = len(a__ )
# Check attention is always last and order is fine
A_ : Optional[Any] = True
A_ : int = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 1 , len(a__ ) )
A_ : Optional[Any] = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
A_ : str = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Optional[Any] = outputs.hidden_states
A_ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a__ ) , a__ )
A_ : str = self.model_tester.seq_length - self.model_tester.num_masks
A_ : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Tuple = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" ,filename="""eating_spaghetti.npy""" ,repo_type="""dataset""" )
A_ : Optional[int] = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
A_ : List[str] = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
a__ )
A_ : Any = self.default_image_processor
A_ : Optional[Any] = prepare_video()
A_ : Union[str, Any] = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
A_ : Any = model(**a__ )
# verify the logits
A_ : List[str] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , a__ )
A_ : Optional[Any] = torch.tensor([0.3669, -0.0688, -0.2421] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
A_ : int = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(a__ )
A_ : str = self.default_image_processor
A_ : Any = prepare_video()
A_ : Union[str, Any] = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# add boolean mask, indicating which patches to mask
A_ : Optional[Any] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A_ : Dict = torch.load(a__ )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**a__ )
# verify the logits
A_ : int = torch.Size([1, 1408, 1536] )
A_ : Union[str, Any] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=a__ )
self.assertEqual(outputs.logits.shape , a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A_ : Optional[Any] = torch.tensor([0.5142] , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A_ : Optional[int] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=a__ ).to(
a__ )
with torch.no_grad():
A_ : Optional[Any] = model(**a__ )
A_ : List[Any] = torch.tensor(torch.tensor([0.6469] ) , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1E-4 ) )
| 481
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Optional[Any]:
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case__ : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : List[str] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
snake_case__ : Tuple = 8
else:
snake_case__ : int = None
return tokenizer.pad(
_lowerCAmelCase , padding="""longest""" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
snake_case__ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
snake_case__ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCAmelCase ) == "1":
snake_case__ : int = 2
# New Code #
snake_case__ : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case__ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : List[Any] = config["""lr"""]
snake_case__ : Optional[Any] = int(config["""num_epochs"""] )
snake_case__ : Union[str, Any] = int(config["""seed"""] )
snake_case__ : List[str] = int(config["""batch_size"""] )
snake_case__ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Any = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
snake_case__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCAmelCase ):
snake_case__ : Any = model(**_lowerCAmelCase )
snake_case__ : str = output.loss
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : str = model(**_lowerCAmelCase )
snake_case__ : Optional[int] = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
snake_case__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _lowerCAmelCase )
def __snake_case( ) -> List[str]:
snake_case__ : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
snake_case__ : Tuple = parser.parse_args()
snake_case__ : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 374
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374
| 1
|
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 445
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=4 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase , )
return config, input_ids, attention_mask
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxDistilBertModelTester(self )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("distilbert-base-uncased" )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
lowerCamelCase_ = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase )
lowerCamelCase_ = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 445
| 1
|
from cva import destroyAllWindows, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
# getting number of pixels in the image
snake_case__ , snake_case__ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
snake_case__ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCamelCase__ : Any = imread("""image_data/lena.jpg""", 1)
# convert to its negative
lowerCamelCase__ : str = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 33
|
from __future__ import annotations
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__magic_name__ , __magic_name__ = array[indexa], array[indexa]
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if length > 1:
__magic_name__ = int(length / 2 )
for i in range(A_, low + middle ):
comp_and_swap(A_, A_, i + middle, A_ )
bitonic_merge(A_, A_, A_, A_ )
bitonic_merge(A_, low + middle, A_, A_ )
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if length > 1:
__magic_name__ = int(length / 2 )
bitonic_sort(A_, A_, A_, 1 )
bitonic_sort(A_, low + middle, A_, 0 )
bitonic_merge(A_, A_, A_, A_ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 529
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710
|
from __future__ import annotations
class lowercase__ :
def __init__( self : int , _lowercase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(_lowercase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowercase ) != cols:
raise error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return len(self.rows )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.rows[0] )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return self.order[0] == self.order[1]
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return bool(self.determinant() )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : int , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowercase ).determinant()
def _UpperCAmelCase ( self : str , _lowercase : int , _lowercase : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(_lowercase , _lowercase )
return -1 * self.get_minor(_lowercase , _lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(_lowercase , _lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.rows )
def __str__( self : Tuple ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(_lowercase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _UpperCAmelCase ( self : Any , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(_lowercase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in column:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , _lowercase : object ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , _lowercase : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : List[Any] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : int , _lowercase : Matrix | int | float ):
"""simple docstring"""
if isinstance(_lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowercase , _lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(_lowercase , _lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCAmelCase ( cls : List[Any] , _lowercase : list[int] , _lowercase : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(_lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
| 0
|
def __lowerCamelCase ( __a :list ) -> list:
"""simple docstring"""
for i in range(len(__a ) - 1 , 0 , -1 ):
A__ = False
for j in range(__a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
A__ , A__ = unsorted[j - 1], unsorted[j]
A__ = True
for j in range(__a ):
if unsorted[j] > unsorted[j + 1]:
A__ , A__ = unsorted[j + 1], unsorted[j]
A__ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A : str = input('''Enter numbers separated by a comma:\n''').strip()
A : Tuple = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 176
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( __a :Optional[int] ) -> str:
"""simple docstring"""
A__ = {}
A__ = tokenizer(example["""content"""] , truncation=__a )["""input_ids"""]
A__ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
A : Union[str, Any] = HfArgumentParser(PretokenizationArguments)
A : str = parser.parse_args()
if args.num_workers is None:
A : Any = multiprocessing.cpu_count()
A : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
A : Optional[int] = time.time()
A : int = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
A : Optional[Any] = time.time()
A : int = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
A : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 176
| 1
|
"""simple docstring"""
UpperCamelCase_ = tuple[float, float, float]
UpperCamelCase_ = tuple[float, float, float]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Vectorad:
"""simple docstring"""
a_ = end_pointa[0] - end_pointa[0]
a_ = end_pointa[1] - end_pointa[1]
a_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Vectorad:
"""simple docstring"""
a_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
a_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
a_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->bool:
"""simple docstring"""
return tuple(round(UpperCAmelCase , UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 10 ) ->bool:
"""simple docstring"""
a_ = create_vector(UpperCAmelCase , UpperCAmelCase )
a_ = create_vector(UpperCAmelCase , UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
| 210
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCamelCase_ = pytest.mark.integration
UpperCamelCase_ = {'comet'}
UpperCamelCase_ = importlib.util.find_spec('fairseq') is not None
UpperCamelCase_ = {'code_eval'}
UpperCamelCase_ = os.name == 'nt'
UpperCamelCase_ = {'bertscore', 'frugalscore', 'perplexity'}
UpperCamelCase_ = importlib.util.find_spec('transformers') is not None
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
@wraps(UpperCAmelCase )
def wrapper(self , UpperCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCAmelCase )
return wrapper
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
@wraps(UpperCAmelCase )
def wrapper(self , UpperCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCAmelCase )
return wrapper
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
@wraps(UpperCAmelCase )
def wrapper(self , UpperCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCAmelCase )
return wrapper
def UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
a_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@local
class snake_case ( parameterized.TestCase ):
a_ : List[Any] = {}
a_ : str = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
a_ = "[...]"
a_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __UpperCAmelCase)).module_path)
a_ = datasets.load.import_main_class(metric_module.__name__ , dataset=__UpperCAmelCase)
# check parameters
a_ = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(__UpperCAmelCase , metric_module.__name__):
with self.use_local_metrics():
try:
a_ = doctest.testmod(__UpperCAmelCase , verbose=__UpperCAmelCase , raise_on_error=__UpperCAmelCase)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int:
a_ = "[...]"
a_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __UpperCAmelCase)).module_path)
# run doctest
with self.use_local_metrics():
a_ = doctest.testmod(__UpperCAmelCase , verbose=__UpperCAmelCase , raise_on_error=__UpperCAmelCase)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__UpperCAmelCase):
yield
else:
yield
@contextmanager
def UpperCAmelCase__ ( self) ->Dict:
def load_local_metric(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase):
return load_metric(os.path.join("metrics" , __UpperCAmelCase) , *__UpperCAmelCase , **__UpperCAmelCase)
with patch("datasets.load_metric") as mock_load_metric:
a_ = load_local_metric
yield
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase) ->int:
def wrapper(__UpperCAmelCase):
a_ = contextmanager(__UpperCAmelCase)
a_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
assert len(input_dict["input_ids"]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
a_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
import torch
def bert_cos_score_idf(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
a_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
def load_from_checkpoint(UpperCAmelCase ):
class snake_case :
def UpperCAmelCase__ ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase) ->Union[str, Any]:
assert len(__UpperCAmelCase) == 2
a_ = [0.19, 0.92]
return scores, sum(__UpperCAmelCase) / len(__UpperCAmelCase)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
a_ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
a_ = load_from_checkpoint
yield
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
a_ = load_metric(os.path.join("metrics" , "seqeval" ) )
a_ = "ERROR"
a_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(UpperCAmelCase , match=re.escape(UpperCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCAmelCase )
| 210
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'imagegpt'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , snake_case_=5_12 + 1 , snake_case_=32 * 32 , snake_case_=5_12 , snake_case_=24 , snake_case_=8 , snake_case_=None , snake_case_="quick_gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , **snake_case_ , ):
lowercase =vocab_size
lowercase =n_positions
lowercase =n_embd
lowercase =n_layer
lowercase =n_head
lowercase =n_inner
lowercase =activation_function
lowercase =resid_pdrop
lowercase =embd_pdrop
lowercase =attn_pdrop
lowercase =layer_norm_epsilon
lowercase =initializer_range
lowercase =scale_attn_weights
lowercase =use_cache
lowercase =scale_attn_by_inverse_layer_idx
lowercase =reorder_and_upcast_attn
lowercase =tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
def _A( self ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def _A( self , snake_case_ , snake_case_ = 1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 32 , snake_case_ = 32 , ):
lowercase =self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
lowercase =dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
return inputs
| 72
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase : Dict = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_UpperCAmelCase : Dict = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_UpperCAmelCase : Tuple = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def _A( self , snake_case_ ):
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def _A( self , snake_case_ , snake_case_ , snake_case_=0.9 , snake_case_=3 , snake_case_=0.5 ):
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase =[
meteor_score.single_meteor_score(
word_tokenize(snake_case_ ) , word_tokenize(snake_case_ ) , alpha=snake_case_ , beta=snake_case_ , gamma=snake_case_ )
for ref, pred in zip(snake_case_ , snake_case_ )
]
else:
lowercase =[
meteor_score.single_meteor_score(snake_case_ , snake_case_ , alpha=snake_case_ , beta=snake_case_ , gamma=snake_case_ )
for ref, pred in zip(snake_case_ , snake_case_ )
]
return {"meteor": np.mean(snake_case_ )}
| 72
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
A_ : Optional[int] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A_ : Dict = model(_lowerCamelCase )["""last_hidden_state"""]
A_ : Optional[int] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice.
A_ : int = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 385
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 385
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , a__ : List[str] , a__ : int=13 , a__ : Optional[Any]=7 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : int=False , a__ : Union[str, Any]=True , a__ : List[Any]=99 , a__ : Optional[int]=32 , a__ : List[str]=5 , a__ : Optional[Any]=4 , a__ : Optional[Any]=37 , a__ : str="gelu" , a__ : List[str]=0.1 , a__ : Optional[Any]=0.1 , a__ : Tuple=512 , a__ : List[Any]=16 , a__ : int=2 , a__ : Dict=0.02 , a__ : Tuple=3 , a__ : List[str]=4 , a__ : Optional[Any]=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def __snake_case ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Dict ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , use_stable_embedding=a__ , )
def __snake_case ( self : List[Any] , a__ : Optional[int] , a__ : str , a__ : int , a__ : Optional[Any] , a__ : Optional[int] , a__ : Tuple , a__ : List[str] ):
UpperCAmelCase = OpenLlamaModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ )
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Dict , a__ : List[str] , a__ : Optional[int] , a__ : List[Any] , a__ : str , a__ : Optional[int] , a__ : Dict , a__ : str , a__ : Optional[int] , a__ : List[Any] , ):
UpperCAmelCase = True
UpperCAmelCase = OpenLlamaModel(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
UpperCAmelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
UpperCAmelCase = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : Tuple , a__ : List[str] , a__ : Optional[Any] , a__ : Dict , a__ : Optional[int] , a__ : List[Any] , a__ : int , a__ : Optional[int] , ):
UpperCAmelCase = OpenLlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : str , a__ : Any , a__ : int , a__ : Union[str, Any] , a__ : Tuple , a__ : Optional[Any] , a__ : Any , a__ : str , a__ : Union[str, Any] , a__ : str , ):
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = OpenLlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
UpperCAmelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
UpperCAmelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
),
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowerCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowerCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = OpenLlamaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(a__ )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase = OpenLlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case ( self : Any ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = '''single_label_classification'''
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(a__ )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase = OpenLlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case ( self : Tuple ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = '''multi_label_classification'''
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(a__ )
UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase = OpenLlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def __snake_case ( self : Optional[int] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __snake_case ( self : Any , a__ : List[str] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = OpenLlamaModel(a__ )
original_model.to(a__ )
original_model.eval()
UpperCAmelCase = original_model(a__ ).last_hidden_state
UpperCAmelCase = original_model(a__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = {'''type''': scaling_type, '''factor''': 10.0}
UpperCAmelCase = OpenLlamaModel(a__ )
scaled_model.to(a__ )
scaled_model.eval()
UpperCAmelCase = scaled_model(a__ ).last_hidden_state
UpperCAmelCase = scaled_model(a__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) )
| 51
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51
| 1
|
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase ):
# preprocessing the first row
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(__lowerCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(__lowerCamelCase ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import cmath
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = math.radians(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = math.radians(__lowerCamelCase )
# Convert voltage and current to rectangular form
_SCREAMING_SNAKE_CASE : str = cmath.rect(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = cmath.rect(__lowerCamelCase, __lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = self.dummy_uncond_unet
lowercase__ = ScoreSdeVeScheduler()
lowercase__ = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ ).images
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ , return_dict=UpperCamelCase_ )[
0
]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = '''google/ncsnpp-church-256'''
lowercase__ = UNetaDModel.from_pretrained(UpperCamelCase_ )
lowercase__ = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase_ )
lowercase__ = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Mock()
lowerCAmelCase_ : Optional[int] = conn, Mock()
lowerCAmelCase_ : Union[str, Any] = iter([1, None] )
lowerCAmelCase_ : Union[str, Any] = lambda lowercase__ : next(lowercase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowercase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 600
| 0
|
from __future__ import annotations
def UpperCamelCase ( _a ) -> list[int]:
'''simple docstring'''
lowercase_ :Tuple = [True] * limit
lowercase_ :Dict = False
lowercase_ :Dict = False
lowercase_ :Optional[Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase_ :str = i * 2
while index < limit:
lowercase_ :Dict = False
lowercase_ :Any = index + i
lowercase_ :List[str] = [2]
for i in range(3 , _a , 2 ):
if is_prime[i]:
primes.append(_a )
return primes
def UpperCamelCase ( _a = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
lowercase_ :Union[str, Any] = prime_sieve(_a )
lowercase_ :Optional[int] = 0
lowercase_ :Optional[int] = 0
for i in range(len(_a ) ):
for j in range(i + length , len(_a ) ):
lowercase_ :List[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase_ :List[str] = j - i
lowercase_ :Optional[int] = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 719
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCamelCase ( _a ) -> float:
'''simple docstring'''
return np.dot(_a , _a )
class UpperCamelCase :
'''simple docstring'''
def __init__( self , *,
UpperCamelCase_ = np.inf , UpperCamelCase_ = "linear" , UpperCamelCase_ = 0.0 , ):
lowercase_ :Dict = regularization
lowercase_ :Optional[Any] = gamma
if kernel == "linear":
lowercase_ :Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowercase_ :Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase_ :str = f"Unknown kernel: {kernel}"
raise ValueError(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
return np.dot(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = observations
lowercase_ :Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase_) , ) :Optional[Any] = np.shape(UpperCamelCase_ )
def to_minimize(UpperCamelCase_ ) -> float:
lowercase_ :Dict = 0
((lowercase_) , ) :Tuple = np.shape(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCamelCase_ )
lowercase_ :Dict = LinearConstraint(UpperCamelCase_ , 0 , 0 )
lowercase_ :Optional[Any] = Bounds(0 , self.regularization )
lowercase_ :Any = minimize(
UpperCamelCase_ , np.ones(UpperCamelCase_ ) , bounds=UpperCamelCase_ , constraints=[ly_contraint] ).x
lowercase_ :Tuple = l_star
# calculating mean offset of separation plane to points
lowercase_ :str = 0
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase_ :List[str] = s / n
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCamelCase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "umt5"
__lowerCamelCase : List[Any] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=250112 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=1024 , _lowerCAmelCase=8 , _lowerCAmelCase=None , _lowerCAmelCase=6 , _lowerCAmelCase=32 , _lowerCAmelCase=128 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=1.0 , _lowerCAmelCase="gated-gelu" , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="T5Tokenizer" , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , **_lowerCAmelCase , ) -> Dict:
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = d_model
_lowerCAmelCase = d_kv
_lowerCAmelCase = d_ff
_lowerCAmelCase = num_layers
_lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCAmelCase = num_heads
_lowerCAmelCase = relative_attention_num_buckets
_lowerCAmelCase = relative_attention_max_distance
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = feed_forward_proj
_lowerCAmelCase = use_cache
_lowerCAmelCase = self.feed_forward_proj.split("-" )
_lowerCAmelCase = act_info[-1]
_lowerCAmelCase = act_info[0] == "gated"
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
_lowerCAmelCase = "gelu_new"
@property
def _snake_case ( self ) -> Optional[Any]:
return self.d_model
@property
def _snake_case ( self ) -> Tuple:
return self.num_heads
@property
def _snake_case ( self ) -> str:
return self.num_layers
class lowerCAmelCase_ ( __magic_name__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
_lowerCAmelCase = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_lowerCAmelCase = "past_encoder_sequence + sequence"
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _snake_case ( self ) -> int:
return 13
@property
def _snake_case ( self ) -> float:
return 5E-4
| 18
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : list ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(_snake_case ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCamelCase ( _snake_case : list ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class lowercase_ (_UpperCAmelCase ):
A__ : int = '''distilbert'''
A__ : Optional[Any] = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , a_=3_0_5_2_2 , a_=5_1_2 , a_=False , a_=6 , a_=1_2 , a_=7_6_8 , a_=4 * 7_6_8 , a_=0.1 , a_=0.1 , a_="gelu" , a_=0.02 , a_=0.1 , a_=0.2 , a_=0 , **a_ , ) ->List[str]:
'''simple docstring'''
_a = vocab_size
_a = max_position_embeddings
_a = sinusoidal_pos_embds
_a = n_layers
_a = n_heads
_a = dim
_a = hidden_dim
_a = dropout
_a = attention_dropout
_a = activation
_a = initializer_range
_a = qa_dropout
_a = seq_classif_dropout
super().__init__(**a_ , pad_token_id=a_ )
class lowercase_ (_UpperCAmelCase ):
@property
def lowerCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 612
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowercase_ (_UpperCAmelCase ):
def __init__( self , *a_ , **a_ ) ->None:
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 612
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_a ,'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_a ,'num_attention_heads' ) )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Optional[int] ,_a : int=13 ,_a : Tuple=32 ,_a : str=2 ,_a : List[Any]=3 ,_a : Tuple=640 ,_a : Union[str, Any]=4 ,_a : Optional[Any]="silu" ,_a : Dict=3 ,_a : Tuple=32 ,_a : List[str]=0.1 ,_a : Any=0.1 ,_a : Optional[int]=0.1 ,_a : Optional[Any]=0.02 ,_a : List[str]=True ,_a : int=True ,_a : Optional[Any]=10 ,_a : Dict=None ,):
'''simple docstring'''
_a : Tuple = parent
_a : Tuple = batch_size
_a : List[Any] = image_size
_a : Union[str, Any] = patch_size
_a : List[Any] = num_channels
_a : Union[str, Any] = last_hidden_size
_a : Union[str, Any] = num_attention_heads
_a : Optional[Any] = hidden_act
_a : Optional[Any] = conv_kernel_size
_a : Tuple = output_stride
_a : Dict = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : int = classifier_dropout_prob
_a : Optional[int] = use_labels
_a : List[Any] = is_training
_a : List[Any] = num_labels
_a : Dict = initializer_range
_a : Optional[int] = scope
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
_a : Optional[int] = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self : Dict ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Dict ,_a : Dict ,_a : Tuple ,_a : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Any = MobileViTModel(config=_a )
model.to(_a )
model.eval()
_a : str = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def __lowercase ( self : int ,_a : List[str] ,_a : int ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
_a : Dict = self.num_labels
_a : int = MobileViTForImageClassification(_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] ,_a : Optional[int] ,_a : List[str] ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : Any = self.num_labels
_a : Tuple = MobileViTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
_a : Dict = model(_a ,labels=_a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = self.prepare_config_and_inputs()
_a, _a, _a, _a : Union[str, Any] = config_and_inputs
_a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : int = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[Any] = False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = MobileViTModelTester(self )
_a : Optional[int] = MobileViTConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(_a : Any ,_a : Optional[Any] ,_a : Dict ):
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : Tuple = outputs.hidden_states
_a : List[str] = 5
self.assertEqual(len(_a ) ,_a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a : Optional[Any] = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[int] = True
check_hidden_states_output(_a ,_a ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] = MobileViTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_a )
_a : Optional[int] = self.default_image_processor
_a : Tuple = prepare_img()
_a : Union[str, Any] = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Tuple = model(**_a )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : Tuple = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_a : Any = model.to(_a )
_a : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_a : str = prepare_img()
_a : Any = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Dict = model(**_a )
_a : Tuple = outputs.logits
# verify the logits
_a : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,_a )
_a : Dict = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] ,device=_a ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_a ,atol=1E-4 ) )
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_a : Tuple = model.to(_a )
_a : Tuple = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_a : Union[str, Any] = prepare_img()
_a : List[Any] = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Tuple = model(**_a )
_a : Tuple = outputs.logits.detach().cpu()
_a : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_a ,target_sizes=[(50, 60)] )
_a : str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,_a )
_a : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=_a )
_a : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,_a )
| 229
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowercase :
'''simple docstring'''
_A : Dict = LEDConfig
_A : Tuple = {}
_A : Union[str, Any] = '''gelu'''
def __init__( self : List[Any] , _a : Tuple , _a : List[Any]=13 , _a : Union[str, Any]=7 , _a : Any=True , _a : List[str]=False , _a : str=99 , _a : Union[str, Any]=32 , _a : List[Any]=2 , _a : int=4 , _a : List[Any]=37 , _a : Optional[Any]=0.1 , _a : Any=0.1 , _a : int=20 , _a : Optional[int]=2 , _a : List[Any]=1 , _a : List[Any]=0 , _a : str=4 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A_ ( self : int ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCamelCase__ = prepare_led_inputs_dict(_a , _a , _a )
UpperCamelCase__ = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
UpperCamelCase__ = global_attention_mask
return config, inputs_dict
def A_ ( self : str , _a : Any , _a : List[str] ):
UpperCamelCase__ = TFLEDModel(config=_a ).get_decoder()
UpperCamelCase__ = inputs_dict['''input_ids''']
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(_a , attention_mask=_a , use_cache=_a )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ = model(_a , attention_mask=_a )[0]
UpperCamelCase__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1E-3 )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : int=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : List[str]=None, ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_A : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_A : List[str] = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : Optional[Any] = True
_A : int = False
_A : Optional[int] = False
_A : int = False
def A_ ( self : Optional[int] ):
UpperCamelCase__ = TFLEDModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a )
def A_ ( self : int ):
self.config_tester.run_common_tests()
def A_ ( self : Any ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCamelCase__ = 2
UpperCamelCase__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCamelCase__ = True
UpperCamelCase__ = self.model_tester.seq_length
UpperCamelCase__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a : int ):
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a : Any ):
UpperCamelCase__ = [t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
UpperCamelCase__ = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def A_ ( self : List[str] ):
pass
def A_ ( self : Dict ):
# TODO: Head-masking not yet implement
pass
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
return tf.constant(UpperCamelCase__, dtype=tf.intaa )
lowercase = 1E-4
@slow
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Any ):
UpperCamelCase__ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCamelCase__ = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = prepare_led_inputs_dict(model.config , _a , _a )
UpperCamelCase__ = model(**_a )[0]
UpperCamelCase__ = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-3 )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCamelCase__ = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = prepare_led_inputs_dict(model.config , _a , _a )
UpperCamelCase__ = model(**_a )[0]
UpperCamelCase__ = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-3 , rtol=1E-3 )
| 240
| 0
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def __lowerCamelCase ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowerCamelCase ( self ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = MPNetModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = MPNetForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.num_labels
__snake_case = MPNetForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.num_choices
__snake_case = MPNetForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.num_labels
__snake_case = MPNetForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
__snake_case = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = True
def __lowerCamelCase ( self ):
__snake_case = MPNetModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_torch
class _lowerCamelCase (unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
__snake_case = MPNetModel.from_pretrained('microsoft/mpnet-base' )
__snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__snake_case = model(SCREAMING_SNAKE_CASE_ )[0]
__snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 700
|
from ..utils import DummyObject, requires_backends
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
| 345
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple =TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
snake_case__ : str ={
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case__ : Optional[Any] =model(snake_case_ )['''last_hidden_state''']
snake_case__ : Union[str, Any] =tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice.
snake_case__ : List[Any] =tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 381
|
from manim import *
class __a ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : Tuple)-> Dict:
__lowerCAmelCase =Rectangle(height=0.5 , width=0.5)
__lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =VGroup(snake_case_ , snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""CPU""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
cpu.move_to([-2.5, -0.5, 0])
self.add(snake_case_)
__lowerCAmelCase =[mem.copy() for i in range(1)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""GPU""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
gpu.align_to(snake_case_ , snake_case_)
gpu.set_x(gpu.get_x() - 1)
self.add(snake_case_)
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""Model""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
model.move_to([3, -1.0, 0])
self.play(
Create(snake_case_ , run_time=1) , Create(snake_case_ , run_time=1) , Create(snake_case_ , run_time=1) , )
__lowerCAmelCase =MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__lowerCAmelCase =Square(side_length=2.2)
key.move_to([-5, 2, 0])
__lowerCAmelCase =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(snake_case_ , run_time=2.5) , Write(snake_case_) , Write(snake_case_))
self.add(snake_case_)
__lowerCAmelCase =[]
__lowerCAmelCase =[]
__lowerCAmelCase =[]
for i, rect in enumerate(snake_case_):
__lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(snake_case_ , opacity=0.7)
cpu_target.move_to(snake_case_)
cpu_target.generate_target()
__lowerCAmelCase =0.4_6 / 4
__lowerCAmelCase =0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=snake_case_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0)
cpu_targs.append(snake_case_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(snake_case_))
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5))
self.play(*snake_case_)
self.play(*snake_case_)
self.wait()
| 354
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : int = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
'''simple docstring'''
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 10_01 ):
'''simple docstring'''
__lowercase =1
for i in range(1, int(ceil(n / 2.0 ) ) ):
__lowercase =2 * i + 1
__lowercase =2 * i
__lowercase =total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 119
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase = {
'''gpt-neox-20b''': 2048,
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Union[str, Any]=None , __lowercase : str=None , __lowercase : Optional[int]="<|endoftext|>" , __lowercase : List[Any]="<|endoftext|>" , __lowercase : Tuple="<|endoftext|>" , __lowercase : Optional[int]=False , **__lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
__lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __lowercase ) != add_prefix_space:
__lowercase =getattr(__lowercase , pre_tok_state.pop('type' ) )
__lowercase =add_prefix_space
__lowercase =pre_tok_class(**__lowercase )
__lowercase =add_prefix_space
def snake_case ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
__lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def snake_case ( self : Optional[Any] , __lowercase : "Conversation" ):
"""simple docstring"""
__lowercase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
__lowercase =input_ids[-self.model_max_length :]
return input_ids
| 119
| 1
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__a = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( ):
'''simple docstring'''
lowercase_ = "https://pypi.org/pypi/diffusers/json"
lowercase_ = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda snake_case__ : version.Version(_lowerCamelCase ) )
def a ( ):
'''simple docstring'''
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowercase_ = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def a ( snake_case__: Any ):
'''simple docstring'''
init_hf_modules()
lowercase_ = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowercase_ = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def a ( snake_case__: List[Any] ):
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
# Imports of the form `import .xxx`
lowercase_ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def a ( snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = False
lowercase_ = [module_file]
lowercase_ = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
lowercase_ = Path(_lowerCamelCase ).parent
lowercase_ = [str(module_path / m ) for m in new_imports]
lowercase_ = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ = [F'''{f}.py''' for f in new_import_files]
lowercase_ = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
# Imports of the form `import xxx`
lowercase_ = re.findall('''^\s*import\s+(\S+)\s*$''' , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ = list(set(_lowerCamelCase ) )
lowercase_ = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(_lowerCamelCase )}. Run `pip install {' '.join(_lowerCamelCase )}`''' )
return get_relative_imports(_lowerCamelCase )
def a ( snake_case__: List[str] , snake_case__: int ):
'''simple docstring'''
lowercase_ = module_path.replace(os.path.sep , '''.''' )
lowercase_ = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def a ( snake_case__: int ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
lowercase_ = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
lowercase_ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ = cls
return pipeline_class
def a ( snake_case__: str , snake_case__: str , snake_case__: Tuple = None , snake_case__: Optional[int] = False , snake_case__: Optional[int] = False , snake_case__: Any = None , snake_case__: Union[str, Any] = None , snake_case__: Tuple = None , snake_case__: List[str] = False , ):
'''simple docstring'''
lowercase_ = str(_lowerCamelCase )
lowercase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
lowercase_ = module_file_or_url
lowercase_ = "local"
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ = get_diffusers_versions()
# cut ".dev0"
lowercase_ = "v" + ".".join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ = F'''v{revision}'''
elif revision == "main":
lowercase_ = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
lowercase_ = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
lowercase_ = "git"
lowercase_ = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
lowercase_ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
lowercase_ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
lowercase_ = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ = F'''{module_needed}.py'''
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase_ = use_auth_token
elif use_auth_token is True:
lowercase_ = HfFolder.get_token()
else:
lowercase_ = None
lowercase_ = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ = submodule_path / commit_hash
lowercase_ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , F'''{module_needed}.py''' , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def a ( snake_case__: Tuple , snake_case__: Any , snake_case__: Optional[Any] = None , snake_case__: Union[str, Any] = None , snake_case__: str = False , snake_case__: Any = False , snake_case__: Tuple = None , snake_case__: int = None , snake_case__: List[Any] = None , snake_case__: List[str] = False , **snake_case__: Tuple , ):
'''simple docstring'''
lowercase_ = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace('''.py''' , '''''' ) )
| 717
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = PegasusTokenizer
a :Any = PegasusTokenizerFast
a :Optional[Any] = True
a :int = True
def _lowercase ( self : Any ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = PegasusTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase ( self : List[str] ) -> Any:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _lowercase ( self : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
return ("This is a test", "This is a test")
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = '''</s>'''
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> str:
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_1_0_3 )
def _lowercase ( self : Tuple ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase_ = rust_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0]
lowercase_ = py_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[str]:
lowercase_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase_ = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowercase_ = tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowercase_ = '''To ensure a smooth flow of bank resolutions.'''
lowercase_ = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowercase_ = tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase ( self : int ) -> Tuple:
lowercase_ = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowercase_ = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowercase_ = self._large_tokenizer(
text_target=SCREAMING_SNAKE_CASE_ , max_length=5 , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(SCREAMING_SNAKE_CASE_ ) == 2 # input_ids, attention_mask.
@slow
def _lowercase ( self : int ) -> Tuple:
# fmt: off
lowercase_ = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Dict = PegasusTokenizer
a :List[Any] = PegasusTokenizerFast
a :Union[str, Any] = True
a :List[Any] = True
def _lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = PegasusTokenizer(SCREAMING_SNAKE_CASE_ , offset=0 , mask_token_sent=SCREAMING_SNAKE_CASE_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _lowercase ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
return ("This is a test", "This is a test")
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase_ = rust_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0]
lowercase_ = py_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_torch
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowercase_ = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowercase_ = self._large_tokenizer(
text_target=SCREAMING_SNAKE_CASE_ , max_length=5 , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(SCREAMING_SNAKE_CASE_ ) == 2 # input_ids, attention_mask.
def _lowercase ( self : Optional[int] ) -> Any:
lowercase_ = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 409
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = 'data2vec-text'
def __init__( self : Dict , a : List[Any]=30_522 , a : Optional[Any]=768 , a : List[str]=12 , a : int=12 , a : Dict=3_072 , a : Optional[Any]="gelu" , a : Tuple=0.1 , a : Union[str, Any]=0.1 , a : Any=512 , a : Optional[Any]=2 , a : Dict=0.02 , a : Optional[Any]=1E-1_2 , a : int=1 , a : Dict=0 , a : Tuple=2 , a : int="absolute" , a : Optional[Any]=True , a : Union[str, Any]=None , **a : int , )-> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 235
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : List[Any] , a : int = 768 , )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Parameter(torch.zeros(1 , a ) )
lowercase__ = nn.Parameter(torch.ones(1 , a ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Optional[Union[str, torch.device]] = None , a : Optional[torch.dtype] = None , )-> Dict:
"""simple docstring"""
lowercase__ = nn.Parameter(self.mean.to(a ).to(a ) )
lowercase__ = nn.Parameter(self.std.to(a ).to(a ) )
return self
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict )-> Dict:
"""simple docstring"""
lowercase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = (embeds * self.std) + self.mean
return embeds
| 235
| 1
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 711
|
from __future__ import annotations
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = str(lowercase )
return n == n[::-1]
def lowerCamelCase__ ( lowercase = 1000000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
for i in range(1 , lowercase ):
if is_palindrome(lowercase ) and is_palindrome(bin(lowercase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 488
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = ["""image_processor""", """tokenizer"""]
snake_case : List[str] = """BlipImageProcessor"""
snake_case : Any = """AutoTokenizer"""
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
# add QFormer tokenizer
_UpperCAmelCase : str = qformer_tokenizer
def __call__(self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_UpperCAmelCase : Tuple = BatchFeature()
if text is not None:
_UpperCAmelCase : int = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
encoding.update(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.qformer_tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Tuple = qformer_text_encoding.pop("""input_ids""" )
_UpperCAmelCase : str = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_UpperCAmelCase : str = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.tokenizer.model_input_names
_UpperCAmelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def snake_case_ (self , lowerCAmelCase__ , **lowerCAmelCase__ ):
if os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase__ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(lowerCAmelCase__ )
return super().save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def snake_case_ (cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , subfolder="""qformer_tokenizer""" )
_UpperCAmelCase : Tuple = cls._get_arguments_from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
args.append(lowerCAmelCase__ )
return cls(*lowerCAmelCase__ )
| 414
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase_ : Any = parser.parse_args()
lowerCAmelCase_ : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ : Any = CLIPImageProcessor()
lowerCAmelCase_ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase_ : Dict = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 414
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _a :
"""simple docstring"""
snake_case =BlenderbotSmallConfig
snake_case ={}
snake_case ="""gelu"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ):
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =seq_length
_UpperCAmelCase =is_training
_UpperCAmelCase =use_labels
_UpperCAmelCase =vocab_size
_UpperCAmelCase =hidden_size
_UpperCAmelCase =num_hidden_layers
_UpperCAmelCase =num_attention_heads
_UpperCAmelCase =intermediate_size
_UpperCAmelCase =hidden_dropout_prob
_UpperCAmelCase =attention_probs_dropout_prob
_UpperCAmelCase =max_position_embeddings
_UpperCAmelCase =eos_token_id
_UpperCAmelCase =pad_token_id
_UpperCAmelCase =bos_token_id
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase =prepare_blenderbot_small_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =TFBlenderbotSmallModel(config=_snake_case ).get_decoder()
_UpperCAmelCase =inputs_dict["input_ids"]
_UpperCAmelCase =input_ids[:1, :]
_UpperCAmelCase =inputs_dict["attention_mask"][:1, :]
_UpperCAmelCase =inputs_dict["head_mask"]
_UpperCAmelCase =1
# first forward pass
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
_UpperCAmelCase , _UpperCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case )[0]
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ) ->int:
if attention_mask is None:
_UpperCAmelCase =tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
snake_case =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
snake_case =(
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case =True
snake_case =False
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =TFBlenderbotSmallModelTester(self )
_UpperCAmelCase =ConfigTester(self , config_class=_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case =[
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
snake_case ="""facebook/blenderbot_small-90M"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.tokenizer(self.src_text , return_tensors="tf" )
_UpperCAmelCase =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_snake_case , )
_UpperCAmelCase =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 592
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( A__ ):
"""simple docstring"""
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
_UpperCAmelCase =BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCAmelCase =bertabert.config.encoder.vocab_size
_UpperCAmelCase =tokenizer.sep_token_id
_UpperCAmelCase =tokenizer.cls_token_id
_UpperCAmelCase =128
_UpperCAmelCase =datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
_UpperCAmelCase =datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
_UpperCAmelCase =train_dataset.select(range(32 ) )
_UpperCAmelCase =val_dataset.select(range(16 ) )
_UpperCAmelCase =4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase =tokenizer(batch["article"] , padding="max_length" , truncation=_snake_case , max_length=512 )
_UpperCAmelCase =tokenizer(batch["highlights"] , padding="max_length" , truncation=_snake_case , max_length=128 )
_UpperCAmelCase =inputs.input_ids
_UpperCAmelCase =inputs.attention_mask
_UpperCAmelCase =outputs.input_ids
_UpperCAmelCase =outputs.input_ids.copy()
_UpperCAmelCase =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
_UpperCAmelCase =outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCAmelCase =pred.label_ids
_UpperCAmelCase =pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase =tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCAmelCase =tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCAmelCase =sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
_UpperCAmelCase =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
_UpperCAmelCase =self.get_auto_remove_tmp_dir()
_UpperCAmelCase =SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy="steps" , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase =SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 592
| 1
|
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(UpperCAmelCase ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519
|
def _UpperCAmelCase ( UpperCAmelCase : list ):
"""simple docstring"""
__lowerCamelCase : Tuple = 0
while len(UpperCAmelCase ) > 1:
__lowerCamelCase : List[str] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__lowerCamelCase : List[Any] = files.index(min(UpperCAmelCase ) )
temp += files[min_index]
files.pop(UpperCAmelCase )
files.append(UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519
| 1
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False, False, False
@dataclass
class _snake_case :
__A : Optional[int] =None
__A : bool =True
__A : bool =True
__A : Optional[str] =None
# Automatically constructed
__A : ClassVar[str] ="dict"
__A : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()})
__A : str =field(default="Audio" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE)
def __call__( self ):
return self.pa_type
def UpperCamelCase__ ( self ,_snake_case ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_snake_case ,_snake_case ):
return {"bytes": None, "path": value}
elif isinstance(_snake_case ,_snake_case ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ : Tuple = BytesIO()
sf.write(_snake_case ,value["array"] ,value["sampling_rate"] ,format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ : List[str] = np.frombuffer(value["bytes"] ,dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
UpperCAmelCase_ : List[Any] = np.memmap(value["path"] ,dtype="h" ,mode="r" ).astype(np.floataa ) / 3_27_67
UpperCAmelCase_ : Tuple = BytesIO(bytes() )
sf.write(_snake_case ,_snake_case ,value["sampling_rate"] ,format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ : Any = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ : Any = xsplitext(_snake_case )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ : Any = token_per_repo_id or {}
UpperCAmelCase_ : Optional[int] = path.split("::" )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_snake_case ,config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ : Union[str, Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ : int = None
with xopen(_snake_case ,"rb" ,use_auth_token=_snake_case ) as f:
UpperCAmelCase_ , UpperCAmelCase_ : Any = sf.read(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = sf.read(_snake_case )
UpperCAmelCase_ : List[Any] = array.T
if self.mono:
UpperCAmelCase_ : Tuple = librosa.to_mono(_snake_case )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ : Union[str, Any] = librosa.resample(_snake_case ,orig_sr=_snake_case ,target_sr=self.sampling_rate )
UpperCAmelCase_ : Any = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase__ ( self ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCamelCase__ ( self ,_snake_case ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Union[str, Any] = pa.array([None] * len(_snake_case ) ,type=pa.binary() )
UpperCAmelCase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] ,["bytes", "path"] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[Any] = pa.array([None] * len(_snake_case ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,["bytes", "path"] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ : Optional[Any] = pa.array([Audio().encode_example(_snake_case ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ : str = storage.field("bytes" )
else:
UpperCAmelCase_ : str = pa.array([None] * len(_snake_case ) ,type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ : Union[str, Any] = storage.field("path" )
else:
UpperCAmelCase_ : int = pa.array([None] * len(_snake_case ) ,type=pa.string() )
UpperCAmelCase_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] ,["bytes", "path"] ,mask=storage.is_null() )
return array_cast(_snake_case ,self.pa_type )
def UpperCamelCase__ ( self ,_snake_case ):
@no_op_if_value_is_null
def path_to_bytes(_snake_case ):
with xopen(_snake_case ,"rb" ) as f:
UpperCAmelCase_ : List[Any] = f.read()
return bytes_
UpperCAmelCase_ : Optional[int] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : Optional[Any] = pa.array(
[os.path.basename(_snake_case ) if path is not None else None for path in storage.field("path" ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] ,["bytes", "path"] ,mask=bytes_array.is_null() )
return array_cast(_snake_case ,self.pa_type )
| 323
|
'''simple docstring'''
from collections import deque
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = deque()
UpperCAmelCase_ : Dict = [False for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Optional[Any] = [-1 for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Tuple = index_of[:]
def strong_connect(_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase_ : Dict = index # the number when this node is seen
UpperCAmelCase_ : str = index # lowest rank node reachable from here
index += 1
stack.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase_ : Union[str, Any] = strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase_ : int = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[Any] = stack.pop()
UpperCAmelCase_ : Union[str, Any] = False
component.append(_SCREAMING_SNAKE_CASE )
while w != v:
UpperCAmelCase_ : Union[str, Any] = stack.pop()
UpperCAmelCase_ : str = False
component.append(_SCREAMING_SNAKE_CASE )
components.append(_SCREAMING_SNAKE_CASE )
return index
UpperCAmelCase_ : List[str] = []
for v in range(_SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return components
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
_lowerCamelCase = 7
_lowerCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCamelCase = [(u, v) for u, v in zip(source, target)]
_lowerCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 323
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
a__ =4_2
class _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , A = 3_2 , A = 6_4 , A = 2_0 , A = 7_6_8 , A=7_7 , A=4 , A = 0.0 , A = "silu" , A = None , A = None , A = "linear" , A = "prd" , A = None , A = None , A = None , ) -> List[str]:
super().__init__()
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : List[Any] = attention_head_dim
_UpperCAmelCase : Optional[int] = num_attention_heads * attention_head_dim
_UpperCAmelCase : List[Any] = additional_embeddings
_UpperCAmelCase : str = time_embed_dim or inner_dim
_UpperCAmelCase : Dict = embedding_proj_dim or embedding_dim
_UpperCAmelCase : Union[str, Any] = clip_embed_dim or embedding_dim
_UpperCAmelCase : str = Timesteps(A , A , 0 )
_UpperCAmelCase : List[str] = TimestepEmbedding(A , A , out_dim=A , act_fn=A )
_UpperCAmelCase : Dict = nn.Linear(A , A )
if embedding_proj_norm_type is None:
_UpperCAmelCase : List[str] = None
elif embedding_proj_norm_type == "layer":
_UpperCAmelCase : Any = nn.LayerNorm(A )
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
_UpperCAmelCase : Any = nn.Linear(A , A )
if encoder_hid_proj_type is None:
_UpperCAmelCase : Optional[int] = None
elif encoder_hid_proj_type == "linear":
_UpperCAmelCase : Optional[int] = nn.Linear(A , A )
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
_UpperCAmelCase : int = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , A ) )
if added_emb_type == "prd":
_UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(1 , 1 , A ) )
elif added_emb_type is None:
_UpperCAmelCase : str = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
_UpperCAmelCase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
A , A , A , dropout=A , activation_fn='''gelu''' , attention_bias=A , )
for d in range(A )
] )
if norm_in_type == "layer":
_UpperCAmelCase : int = nn.LayerNorm(A )
elif norm_in_type is None:
_UpperCAmelCase : List[Any] = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' )
_UpperCAmelCase : Tuple = nn.LayerNorm(A )
_UpperCAmelCase : Optional[int] = nn.Linear(A , A )
_UpperCAmelCase : Tuple = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_UpperCAmelCase : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , A , persistent=A )
_UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , A ) )
_UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(1 , A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : int = {}
def fn_recursive_add_processors(A , A , A ):
if hasattr(A , '''set_processor''' ):
_UpperCAmelCase : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def __lowerCAmelCase ( self , A ) -> Any:
_UpperCAmelCase : str = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(A )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(A , A , A ):
if hasattr(A , '''set_processor''' ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def __lowerCAmelCase ( self ) -> Dict:
self.set_attn_processor(AttnProcessor() )
def __lowerCAmelCase ( self , A , A , A , A = None , A = None , A = True , ) -> str:
_UpperCAmelCase : Tuple = hidden_states.shape[0]
_UpperCAmelCase : Dict = timestep
if not torch.is_tensor(A ):
_UpperCAmelCase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : Any = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase : Union[str, Any] = timesteps * torch.ones(A , dtype=timesteps.dtype , device=timesteps.device )
_UpperCAmelCase : Union[str, Any] = self.time_proj(A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_UpperCAmelCase : Optional[int] = timesteps_projected.to(dtype=self.dtype )
_UpperCAmelCase : int = self.time_embedding(A )
if self.embedding_proj_norm is not None:
_UpperCAmelCase : Dict = self.embedding_proj_norm(A )
_UpperCAmelCase : Union[str, Any] = self.embedding_proj(A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_UpperCAmelCase : Optional[int] = self.encoder_hidden_states_proj(A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
_UpperCAmelCase : Optional[int] = self.proj_in(A )
_UpperCAmelCase : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Dict = 0
if encoder_hidden_states is not None:
additional_embeds.append(A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_UpperCAmelCase : Union[str, Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_UpperCAmelCase : List[str] = hidden_states[:, None, :]
_UpperCAmelCase : List[str] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_UpperCAmelCase : str = self.prd_embedding.to(hidden_states.dtype ).expand(A , -1 , -1 )
additional_embeds.append(A )
_UpperCAmelCase : Union[str, Any] = torch.cat(
A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_UpperCAmelCase : List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_UpperCAmelCase : Optional[Any] = F.pad(
A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_UpperCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_UpperCAmelCase : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_UpperCAmelCase : Dict = F.pad(A , (0, self.additional_embeddings) , value=0.0 )
_UpperCAmelCase : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_UpperCAmelCase : List[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_UpperCAmelCase : Union[str, Any] = self.norm_in(A )
for block in self.transformer_blocks:
_UpperCAmelCase : Optional[int] = block(A , attention_mask=A )
_UpperCAmelCase : Any = self.norm_out(A )
if self.prd_embedding is not None:
_UpperCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_UpperCAmelCase : Optional[int] = hidden_states[:, additional_embeddings_len:]
_UpperCAmelCase : Optional[int] = self.proj_to_clip_embeddings(A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=A )
def __lowerCAmelCase ( self , A ) -> List[str]:
_UpperCAmelCase : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 506
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669
| 0
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_SCREAMING_SNAKE_CASE = '''scheduler_config.json'''
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = 1
a : Dict = 2
a : Optional[Any] = 3
a : List[str] = 4
a : Any = 5
@dataclass
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : jnp.ndarray
class __lowercase :
'''simple docstring'''
a : str = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = ["dtype"]
a : str = []
a : List[Any] = True
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase ,subfolder=_lowerCamelCase ,return_unused_kwargs=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase , __lowercase = cls.from_config(_lowerCamelCase ,return_unused_kwargs=_lowerCamelCase ,**_lowerCamelCase )
if hasattr(_lowerCamelCase ,'''create_state''' ) and getattr(_lowerCamelCase ,'''has_state''' ,_lowerCamelCase ):
__lowercase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = False ,**_lowerCamelCase ) -> str:
'''simple docstring'''
self.save_config(save_directory=_lowerCamelCase ,push_to_hub=_lowerCamelCase ,**_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _UpperCAmelCase (cls ) -> int:
'''simple docstring'''
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split('''.''' )[0] )
__lowercase = [
getattr(_lowerCamelCase ,_lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase ,_lowerCamelCase )
]
return compatible_classes
def _lowerCAmelCase ( lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : Tuple[int] ):
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any]=0.9_99 , lowerCamelCase_ : Union[str, Any]=jnp.floataa ):
def alpha_bar(lowerCamelCase_ : Any ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__lowercase = []
for i in range(lowerCamelCase_ ):
__lowercase = i / num_diffusion_timesteps
__lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class __lowercase :
'''simple docstring'''
a : jnp.ndarray
a : jnp.ndarray
a : jnp.ndarray
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = scheduler.config
if config.trained_betas is not None:
__lowercase = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowercase = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
__lowercase = 1.0 - betas
__lowercase = jnp.cumprod(_lowerCamelCase ,axis=0 )
return cls(
alphas=_lowerCamelCase ,betas=_lowerCamelCase ,alphas_cumprod=_lowerCamelCase ,)
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase = state.alphas_cumprod
__lowercase = alphas_cumprod[timesteps] ** 0.5
__lowercase = sqrt_alpha_prod.flatten()
__lowercase = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
__lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowercase = sqrt_one_minus_alpha_prod.flatten()
__lowercase = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase , __lowercase = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase , __lowercase = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 1
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : float) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(lowercase_ ) )
A__ = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A__ = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 87
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''height''': 18, '''width''': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = EfficientFormerImageProcessorTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 87
| 1
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
UpperCamelCase : Optional[Any] = 'src/transformers'
# Matches is_xxx_available()
UpperCamelCase : int = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
UpperCamelCase : int = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase : List[str] = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
UpperCamelCase : str = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase : Optional[Any] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase : Optional[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase : Optional[Any] = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase : Any = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
UpperCamelCase : List[str] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
UpperCamelCase : Union[str, Any] = re.compile(r'^\s*try:')
# Catches a line with else:
UpperCamelCase : List[Any] = re.compile(r'^\s*else:')
def A__ ( __lowerCAmelCase : Optional[int] ):
if _re_test_backend.search(__lowerCAmelCase ) is None:
return None
lowerCamelCase__ = [b[0] for b in _re_backend.findall(__lowerCAmelCase )]
backends.sort()
return "_and_".join(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Tuple ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = 0
while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCamelCase__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowerCamelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCAmelCase ):
lowerCamelCase__ = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0]
lowerCamelCase__ = re.findall(R"""\[([^\]]+)\]""" , __lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowerCamelCase__ = _re_import_struct_key_value.search(__lowerCAmelCase )
if single_line_import_search is not None:
lowerCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowerCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowerCamelCase__ = lines[line_index]
if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None:
lowerCamelCase__ = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(""", """ )
lowerCamelCase__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_between_brackets.search(__lowerCAmelCase ) is not None:
lowerCamelCase__ = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(""", """ )
lowerCamelCase__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_quote_object.search(__lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
lowerCamelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCamelCase__ = []
while (
line_index < len(__lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCamelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
def find_duplicates(__lowerCAmelCase : Any ):
return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCamelCase__ = []
for key in import_dict_objects.keys():
lowerCamelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCamelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCamelCase__ = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def A__ ( ):
lowerCamelCase__ = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """__init__.py""" )
lowerCamelCase__ = parse_init(__lowerCAmelCase )
if objects is not None:
lowerCamelCase__ = analyze_results(*__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
raise ValueError("""\n\n""".join(__lowerCAmelCase ) )
def A__ ( ):
lowerCamelCase__ = []
for path, directories, files in os.walk(__lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowerCamelCase__ = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) )
lowerCamelCase__ = short_path.replace(os.path.sep , """.""" )
submodules.append(__lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCamelCase__ = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) )
lowerCamelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__lowerCAmelCase )
return submodules
UpperCamelCase : Union[str, Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def A__ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCamelCase__ = direct_transformers_import(__lowerCAmelCase )
lowerCamelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" ) as f:
lowerCamelCase__ = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __lowerCAmelCase ) ) )
lowerCamelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 9
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = mask_ratio
lowerCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
# expected sequence length = num_patches
lowerCamelCase__ = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
lowerCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = outputs_dict[0].numpy()
lowerCamelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase ):
lowerCamelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCAmelCase ):
lowerCamelCase__ = v.numpy()
else:
lowerCamelCase__ = np.array(_lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.constant(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ = tf_noise
super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCAmelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase )
}
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ = main_layer_class(_lowerCAmelCase )
lowerCamelCase__ = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) )
lowerCamelCase__ = model(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" )
model.save(_lowerCAmelCase )
lowerCamelCase__ = tf.keras.models.load_model(
_lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCAmelCase ,tf.keras.Model )
lowerCamelCase__ = model(_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = outputs.last_hidden_state.numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = outputs.logits.numpy()
lowerCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase )
lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = after_outputs["""logits"""].numpy()
lowerCamelCase__ = 0
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-5 )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCAmelCase )
lowerCamelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ = model_class.from_config(model.config )
lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCamelCase_ ( self ):
pass
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ = ViTMAEConfig()
lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
| 9
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = "▁"
UpperCAmelCase_ : Optional[int] = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
UpperCAmelCase_ : Tuple = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
UpperCAmelCase_ : str = {"vinai/bartpho-syllable": 1024}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Dict = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_a : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
_a : int = vocab_file
_a : Union[str, Any] = monolingual_vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : Union[str, Any] = {}
_a : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
_a : Union[str, Any] = cnt
cnt += 1
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
_a : int = line.strip().split()[0]
_a : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
_a : Tuple = len(self.fairseq_tokens_to_ids )
_a : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
_a : Any = self.__dict__.copy()
_a : List[str] = None
_a : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase_ ) -> Tuple:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_a : int = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Any = [self.cls_token_id]
_a : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : Tuple = [self.sep_token_id]
_a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
return len(self.fairseq_ids_to_tokens )
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Optional[Any] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.fairseq_ids_to_tokens[index]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Tuple:
_a : Optional[Any] = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_ , ' ' ).strip()
return out_string
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a : Dict = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : Optional[int] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , 'wb' ) as fi:
_a : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(lowerCamelCase_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 120
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> str:
_a : int = parent
_a : str = batch_size
_a : Any = seq_length
_a : Tuple = is_training
_a : int = use_input_mask
_a : Any = use_token_type_ids
_a : List[Any] = use_labels
_a : Optional[int] = vocab_size
_a : str = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Tuple = intermediate_size
_a : Optional[int] = hidden_act
_a : str = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : Dict = type_sequence_label_size
_a : List[Any] = initializer_range
_a : Optional[Any] = num_labels
_a : Dict = num_choices
_a : Dict = scope
def __UpperCamelCase ( self ) -> Dict:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : str = None
if self.use_input_mask:
_a : str = random_attention_mask([self.batch_size, self.seq_length] )
_a : int = None
if self.use_token_type_ids:
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] = None
_a : Any = None
_a : Optional[Any] = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_a : List[Any] = LlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
_a : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Any:
_a : Dict = True
_a : Tuple = LlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
_a : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
_a : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Dict:
_a : Dict = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> str:
_a : Optional[Any] = True
_a : Tuple = True
_a : Optional[int] = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
_a : str = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
_a : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_a : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_a : Any = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )['hidden_states'][0]
_a : List[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )['hidden_states'][0]
# select random slice
_a : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_a : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Dict = config_and_inputs
_a : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase : int = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = False
def __UpperCamelCase ( self ) -> str:
_a : Optional[int] = LlamaModelTester(self )
_a : Tuple = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def __UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : List[str] = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> List[Any]:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = 3
_a : Union[str, Any] = input_dict['input_ids']
_a : List[Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
_a : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a : Any = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> List[str]:
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = 3
_a : List[str] = 'single_label_classification'
_a : Union[str, Any] = input_dict['input_ids']
_a : str = input_ids.ne(1 ).to(lowerCamelCase_ )
_a : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a : str = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Tuple:
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Union[str, Any] = 3
_a : str = 'multi_label_classification'
_a : Union[str, Any] = input_dict['input_ids']
_a : Union[str, Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
_a : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a : str = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) -> Optional[int]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_a : Union[str, Any] = ids_tensor([1, 1_0] , config.vocab_size )
_a : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_a : int = LlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
_a : str = original_model(lowerCamelCase_ ).last_hidden_state
_a : Union[str, Any] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_a : Optional[Any] = {'type': scaling_type, 'factor': 10.0}
_a : Any = LlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
_a : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
_a : int = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> int:
_a : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_a : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_a : List[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Any = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a : Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_a : List[Any] = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
_a : Optional[int] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a : List[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> Any:
_a : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_a : str = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
_a : int = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def __UpperCamelCase ( self ) -> Dict:
_a : Dict = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_a : Dict = model(torch.tensor(lowerCamelCase_ ) )
_a : str = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
_a : Any = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : List[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_a : List[Any] = 'Simply put, the theory of relativity states that '
_a : List[Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_a : Any = tokenizer.encode(lowerCamelCase_ , return_tensors='pt' )
_a : Union[str, Any] = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowerCamelCase_ )
# greedy generation outputs
_a : List[Any] = model.generate(lowerCamelCase_ , max_new_tokens=6_4 , top_p=lowerCamelCase_ , temperature=1 , do_sample=lowerCamelCase_ )
_a : Dict = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 120
| 1
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A_ :Union[str, Any] = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A ( a_ ) -> List[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A ( a_ ,a_ ) -> Optional[Any]:
if args.student_type == "roberta":
__UpperCamelCase : Union[str, Any] =False
elif args.student_type == "gpt2":
__UpperCamelCase : Optional[Any] =False
def A ( a_ ,a_ ) -> Tuple:
if args.student_type == "roberta":
__UpperCamelCase : Optional[Any] =False
def A ( ) -> str:
__UpperCamelCase : Optional[Any] =argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' ,action='store_true' ,help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' ,type=a_ ,required=a_ ,help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' ,type=a_ ,required=a_ ,help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' ,)
parser.add_argument(
'--student_type' ,type=a_ ,choices=['distilbert', 'roberta', 'gpt2'] ,required=a_ ,help='The student type (DistilBERT, RoBERTa).' ,)
parser.add_argument('--student_config' ,type=a_ ,required=a_ ,help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' ,default=a_ ,type=a_ ,help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' ,choices=['bert', 'roberta', 'gpt2'] ,required=a_ ,help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' ,type=a_ ,required=a_ ,help='The teacher model.' )
parser.add_argument('--temperature' ,default=2.0 ,type=a_ ,help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' ,default=0.5 ,type=a_ ,help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' ,default=0.0 ,type=a_ ,help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' ,)
parser.add_argument('--alpha_clm' ,default=0.5 ,type=a_ ,help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' ,default=0.0 ,type=a_ ,help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' ,default=0.0 ,type=a_ ,help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' ,action='store_true' ,help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' ,default=0.15 ,type=a_ ,help='Proportion of tokens for which we need to make a prediction.' ,)
parser.add_argument('--word_mask' ,default=0.8 ,type=a_ ,help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' ,default=0.1 ,type=a_ ,help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' ,default=0.1 ,type=a_ ,help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' ,default=0.7 ,type=a_ ,help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' ,)
parser.add_argument('--token_counts' ,type=a_ ,help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' ,action='store_true' ,help='If true, compute the distillation loss only the [MLM] prediction distribution.' ,)
parser.add_argument(
'--freeze_pos_embs' ,action='store_true' ,help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' ,)
parser.add_argument(
'--freeze_token_type_embds' ,action='store_true' ,help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' ,)
parser.add_argument('--n_epoch' ,type=a_ ,default=3 ,help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' ,type=a_ ,default=5 ,help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' ,action='store_false' ,help='If true, group sequences that have similar length into the same batch. Default is true.' ,)
parser.add_argument(
'--gradient_accumulation_steps' ,type=a_ ,default=50 ,help='Gradient accumulation for larger training batches.' ,)
parser.add_argument('--warmup_prop' ,default=0.05 ,type=a_ ,help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' ,default=0.0 ,type=a_ ,help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' ,default=5e-4 ,type=a_ ,help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' ,default=1e-6 ,type=a_ ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' ,default=5.0 ,type=a_ ,help='Max gradient norm.' )
parser.add_argument('--initializer_range' ,default=0.02 ,type=a_ ,help='Random initialization range.' )
parser.add_argument(
'--fp16' ,action='store_true' ,help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' ,)
parser.add_argument(
'--fp16_opt_level' ,type=a_ ,default='O1' ,help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) ,)
parser.add_argument('--n_gpu' ,type=a_ ,default=1 ,help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' ,type=a_ ,default=-1 ,help='Distributed training - Local rank' )
parser.add_argument('--seed' ,type=a_ ,default=56 ,help='Random seed' )
parser.add_argument('--log_interval' ,type=a_ ,default=500 ,help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' ,type=a_ ,default=4_000 ,help='Checkpoint interval.' )
__UpperCamelCase : Any =parser.parse_args()
sanity_checks(a_ )
# ARGS #
init_gpu_params(a_ )
set_seed(a_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path ,'parameters.json' ) ,'w' ) as f:
json.dump(vars(a_ ) ,a_ ,indent=4 )
git_log(args.dump_path )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] =MODEL_CLASSES[args.student_type]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCamelCase : Any =teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCamelCase : int ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCamelCase : Any =tokenizer.all_special_tokens.index(a_ )
__UpperCamelCase : Optional[Any] =tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
__UpperCamelCase : List[str] =special_tok_ids
__UpperCamelCase : Union[str, Any] =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file ,'rb' ) as fp:
__UpperCamelCase : Tuple =pickle.load(a_ )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts ,'rb' ) as fp:
__UpperCamelCase : str =pickle.load(a_ )
__UpperCamelCase : Dict =np.maximum(a_ ,1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCamelCase : Any =0.0 # do not predict special tokens
__UpperCamelCase : Dict =torch.from_numpy(a_ )
else:
__UpperCamelCase : int =None
__UpperCamelCase : str =LmSeqsDataset(params=a_ ,data=a_ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
__UpperCamelCase : Dict =student_config_class.from_pretrained(args.student_config )
__UpperCamelCase : List[Any] =True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
__UpperCamelCase : Optional[Any] =student_model_class.from_pretrained(args.student_pretrained_weights ,config=a_ )
else:
__UpperCamelCase : List[Any] =student_model_class(a_ )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info('Student loaded.' )
# TEACHER #
__UpperCamelCase : Optional[Any] =teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=a_ )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a_ ,a_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a_ ,a_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCamelCase : Any =Distiller(
params=a_ ,dataset=a_ ,token_probs=a_ ,student=a_ ,teacher=a_ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 154
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : str =field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ : ClassVar[Features] =Features({"""image""": Image()} )
UpperCamelCase__ : ClassVar[Features] =Features({"""labels""": ClassLabel} )
UpperCamelCase__ : str ="image"
UpperCamelCase__ : str ="labels"
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__UpperCamelCase : List[str] =copy.deepcopy(self )
__UpperCamelCase : Optional[Any] =self.label_schema.copy()
__UpperCamelCase : List[Any] =features[self.label_column]
__UpperCamelCase : Optional[int] =label_schema
return task_template
@property
def __lowercase ( self ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 154
| 1
|
import re
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
a = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$")
return bool(re.search(__UpperCamelCase , __UpperCamelCase))
if __name__ == "__main__":
lowercase__ : Tuple = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 515
|
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase) -> Any:
a = [x.strip() for x in open(__UpperCamelCase).readlines()]
a = [x.strip() for x in open(__UpperCamelCase).readlines()][: len(__UpperCamelCase)]
a = calculate_rouge(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase)
if save_path is not None:
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase)
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 515
| 1
|
'''simple docstring'''
import numpy as np
import datasets
A_ : List[Any] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
A_ : Any = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
A_ : str = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# convert to numpy arrays
snake_case__ : int = np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
snake_case__ : Dict = X - np.mean(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = np.cov(reference_distribution.T )
try:
snake_case__ : Optional[int] = np.linalg.inv(__SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
snake_case__ : str = np.linalg.pinv(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = np.dot(__SCREAMING_SNAKE_CASE , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 706
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> int:
'''simple docstring'''
with open(__magic_name__ ) as metadata_file:
snake_case__ : Optional[Any] = json.load(__magic_name__ )
snake_case__ : Tuple = LukeConfig(use_entity_aware_attention=__magic_name__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
snake_case__ : Tuple = torch.load(__magic_name__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
snake_case__ : Any = load_original_entity_vocab(__magic_name__ )
# add an entry for [MASK2]
snake_case__ : List[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case__ : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case__ : Optional[Any] = AddedToken("""<ent>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case__ : Any = AddedToken("""<ent2>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """r""" ) as f:
snake_case__ : Union[str, Any] = json.load(__magic_name__ )
snake_case__ : Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
snake_case__ : List[Any] = MLukeTokenizer.from_pretrained(__magic_name__ )
# Initialize the embeddings of the special tokens
snake_case__ : List[str] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
snake_case__ : Optional[Any] = state_dict["""embeddings.word_embeddings.weight"""]
snake_case__ : List[str] = word_emb[ent_init_index].unsqueeze(0 )
snake_case__ : List[str] = word_emb[enta_init_index].unsqueeze(0 )
snake_case__ : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case__ : List[str] = state_dict[bias_name]
snake_case__ : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case__ : Dict = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case__ : str = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case__ : Union[str, Any] = f"encoder.layer.{layer_index}.attention.self."
snake_case__ : Tuple = state_dict[prefix + matrix_name]
snake_case__ : str = state_dict[prefix + matrix_name]
snake_case__ : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case__ : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
snake_case__ : Union[str, Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case__ : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case__ : Optional[Any] = state_dict["""entity_predictions.bias"""]
snake_case__ : Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case__ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case__ : int = LukeForMaskedLM(config=__magic_name__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
snake_case__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
snake_case__ : Optional[Any] = state_dict[key]
else:
snake_case__ : Optional[int] = state_dict[key]
snake_case__ , snake_case__ : Any = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if set(__magic_name__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__magic_name__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case__ : List[Any] = MLukeTokenizer.from_pretrained(__magic_name__ , task="""entity_classification""" )
snake_case__ : int = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
snake_case__ : Union[str, Any] = (0, 9)
snake_case__ : str = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" )
snake_case__ : List[Any] = model(**__magic_name__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case__ : List[Any] = torch.Size((1, 33, 7_68) )
snake_case__ : Optional[int] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case__ : Tuple = torch.Size((1, 1, 7_68) )
snake_case__ : int = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case__ : Optional[Any] = MLukeTokenizer.from_pretrained(__magic_name__ )
snake_case__ : Any = """Tokyo is the capital of <mask>."""
snake_case__ : str = (24, 30)
snake_case__ : List[str] = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" )
snake_case__ : Optional[int] = model(**__magic_name__ )
snake_case__ : List[Any] = encoding["""input_ids"""][0].tolist()
snake_case__ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
snake_case__ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__magic_name__ )
snake_case__ : str = outputs.entity_logits[0][0].argmax().item()
snake_case__ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__magic_name__ ) )
model.save_pretrained(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = ["""[MASK]""", """[PAD]""", """[UNK]"""]
snake_case__ : str = [json.loads(__magic_name__ ) for line in open(__magic_name__ )]
snake_case__ : List[str] = {}
for entry in data:
snake_case__ : Dict = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case__ : List[Any] = entity_id
break
snake_case__ : Optional[Any] = f"{language}:{entity_name}"
snake_case__ : Any = entity_id
return new_mapping
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 419
| 0
|
"""simple docstring"""
import torch
def lowercase__ ( ) -> Optional[int]:
if torch.cuda.is_available():
lowerCAmelCase__ : Tuple = torch.cuda.device_count()
else:
lowerCAmelCase__ : int = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 308
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__ ( lowerCamelCase : Dict ) -> Tuple: # picklable for multiprocessing
return x.sum()
def lowercase__ ( lowerCamelCase : List[Any] ) -> Any: # picklable for multiprocessing
return i + 1
@dataclass
class lowercase_ :
__magic_name__ : int
__magic_name__ : str
class lowercase_ ( a_ ):
def _lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Tuple = [1, 2]
lowerCAmelCase__ : Optional[int] = {"a": 1, "b": 2}
lowerCAmelCase__ : int = {"a": [1, 2], "b": [3, 4]}
lowerCAmelCase__ : Union[str, Any] = {"a": {"1": 1}, "b": 2}
lowerCAmelCase__ : Dict = {"a": 1, "b": 2, "c": 3, "d": 4}
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Optional[Any] = 2
lowerCAmelCase__ : Optional[Any] = [2, 3]
lowerCAmelCase__ : List[Any] = {"a": 2, "b": 3}
lowerCAmelCase__ : Optional[int] = {"a": [2, 3], "b": [4, 5]}
lowerCAmelCase__ : Dict = {"a": {"1": 2}, "b": 3}
lowerCAmelCase__ : Tuple = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
lowerCAmelCase__ : Any = 2
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
lowerCAmelCase__ : Optional[int] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
lowerCAmelCase__ : Union[str, Any] = {"a": 2, "b": 0, "c": 2}
lowerCAmelCase__ : str = {
"a": np.eye(2 ).astype(_lowercase ),
"b": np.zeros(3 ).astype(_lowercase ),
"c": np.ones(2 ).astype(_lowercase ),
}
self.assertEqual(map_nested(_lowercase , _lowercase , map_numpy=_lowercase ) , _lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowercase , _lowercase , map_numpy=_lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowercase , _lowercase , map_numpy=_lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowercase , _lowercase , map_numpy=_lowercase , num_proc=_lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowercase ): # can't pickle a local lambda
map_nested(lambda _lowercase : x + 1 , _lowercase , num_proc=_lowercase )
def _lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : List[Any] = {"a": 1, "b": 2}
lowerCAmelCase__ : Any = {"a": 3, "b": 4}
lowerCAmelCase__ : Union[str, Any] = {"a": 5, "b": 6}
lowerCAmelCase__ : Any = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowercase , _lowercase , _lowercase ) ) , _lowercase )
def _lowerCAmelCase ( self : Optional[int] ):
class lowercase_ :
__magic_name__ : int = """bar"""
lowerCAmelCase__ : List[str] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(_lowercase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def lowercase__ ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : List[Any] ) -> Any:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
lowerCAmelCase__ : Dict = {F"{i}": i for i in range(lowerCamelCase )}
lowerCAmelCase__ : Optional[Any] = map_nested(lambda lowerCamelCase : x + 1_0 , lowerCamelCase , num_proc=lowerCamelCase , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowercase_ ( a_ ):
@require_tf
def _lowerCAmelCase ( self : Optional[int] ):
import tensorflow as tf
from tensorflow.keras import layers
lowerCAmelCase__ : str = layers.Dense(2 )
def gen_random_output():
lowerCAmelCase__ : Optional[int] = tf.random.uniform((1, 3) )
return model(_lowercase ).numpy()
with temp_seed(4_2 , set_tensorflow=_lowercase ):
lowerCAmelCase__ : Tuple = gen_random_output()
with temp_seed(4_2 , set_tensorflow=_lowercase ):
lowerCAmelCase__ : Union[str, Any] = gen_random_output()
lowerCAmelCase__ : Optional[Any] = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowerCAmelCase ( self : Tuple ):
import torch
def gen_random_output():
lowerCAmelCase__ : Dict = torch.nn.Linear(3 , 2 )
lowerCAmelCase__ : str = torch.rand(1 , 3 )
return model(_lowercase ).detach().numpy()
with temp_seed(4_2 , set_pytorch=_lowercase ):
lowerCAmelCase__ : Any = gen_random_output()
with temp_seed(4_2 , set_pytorch=_lowercase ):
lowerCAmelCase__ : Optional[Any] = gen_random_output()
lowerCAmelCase__ : Union[str, Any] = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowerCAmelCase ( self : Tuple ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
lowerCAmelCase__ : Union[str, Any] = gen_random_output()
with temp_seed(4_2 ):
lowerCAmelCase__ : List[str] = gen_random_output()
lowerCAmelCase__ : Any = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def lowercase__ ( lowerCamelCase : Dict ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = NestedDataStructure(lowerCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def lowercase__ ( lowerCamelCase : Optional[int] , lowerCamelCase : str ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = NestedDataStructure(lowerCamelCase ).flatten()
assert output == expected_output
def lowercase__ ( ) -> List[str]:
lowerCAmelCase__ : Tuple = A(x=1 , y="foobar" )
lowerCAmelCase__ : Dict = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase ) == expected_output
lowerCAmelCase__ : List[str] = {"a": {"b": A(x=1_0 , y="foo" )}, "c": [A(x=2_0 , y="bar" )]}
lowerCAmelCase__ : Union[str, Any] = {"a": {"b": {"x": 1_0, "y": "foo"}}, "c": [{"x": 2_0, "y": "bar"}]}
assert asdict(lowerCamelCase ) == expected_output
with pytest.raises(lowerCamelCase ):
asdict([1, A(x=1_0 , y="foo" )] )
def lowercase__ ( lowerCamelCase : str ) -> List[str]:
return text.split()
def lowercase__ ( lowerCamelCase : str ) -> int:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__ ( ) -> int:
with Pool(2 ) as pool:
lowerCAmelCase__ : List[str] = list(iflatmap_unordered(lowerCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 1_0 ) )
assert out.count("hello" ) == 1_0
assert out.count("there" ) == 1_0
assert len(lowerCamelCase ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowerCAmelCase__ : Optional[int] = list(iflatmap_unordered(lowerCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 1_0 ) )
assert out.count("hello" ) == 1_0
assert out.count("there" ) == 1_0
assert len(lowerCamelCase ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowerCAmelCase__ : List[str] = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase ) == 4
| 308
| 1
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.0_2 , __A=3 , __A=0.6 , __A=None , ) -> Dict:
lowerCAmelCase_ :List[Any] = parent
lowerCAmelCase_ :Union[str, Any] = batch_size
lowerCAmelCase_ :Dict = image_size
lowerCAmelCase_ :int = patch_size
lowerCAmelCase_ :Dict = num_channels
lowerCAmelCase_ :str = is_training
lowerCAmelCase_ :Union[str, Any] = use_labels
lowerCAmelCase_ :Union[str, Any] = hidden_size
lowerCAmelCase_ :List[str] = num_hidden_layers
lowerCAmelCase_ :Dict = num_attention_heads
lowerCAmelCase_ :str = intermediate_size
lowerCAmelCase_ :List[str] = hidden_act
lowerCAmelCase_ :int = hidden_dropout_prob
lowerCAmelCase_ :Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ :Union[str, Any] = type_sequence_label_size
lowerCAmelCase_ :Optional[int] = initializer_range
lowerCAmelCase_ :Tuple = mask_ratio
lowerCAmelCase_ :List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase_ :int = (image_size // patch_size) ** 2
lowerCAmelCase_ :Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :Any = None
if self.use_labels:
lowerCAmelCase_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Tuple:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = ViTMAEModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Union[str, Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> str:
lowerCAmelCase_ :Any = ViTMAEForPreTraining(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Union[str, Any] = model(__A )
lowerCAmelCase_ :str = (self.image_size // self.patch_size) ** 2
lowerCAmelCase_ :Any = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase_ :Dict = 1
lowerCAmelCase_ :Optional[int] = ViTMAEForPreTraining(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ :str = model(__A )
lowerCAmelCase_ :Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = config_and_inputs
lowerCAmelCase_ :Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCAmelCase_ :int = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :Union[str, Any] = False
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :List[Any] = False
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ViTMAEModelTester(self )
lowerCAmelCase_ :Optional[int] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Optional[Any] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ :Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[Any] = model_class(__A )
lowerCAmelCase_ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ :List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Union[str, Any]:
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase_ :Any = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCAmelCase_ :Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ :Tuple = torch.from_numpy(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase_ :Any = pt_noise
super().check_pt_tf_models(__A , __A , __A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase_ :Any = model(**self._prepare_for_class(__A , __A ) )
lowerCAmelCase_ :str = outputs[0].cpu().numpy()
lowerCAmelCase_ :Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
lowerCAmelCase_ :str = model_class.from_pretrained(__A )
model.to(__A )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase_ :Dict = model(**self._prepare_for_class(__A , __A ) )
# Make sure we don't have nans
lowerCAmelCase_ :Dict = after_outputs[0].cpu().numpy()
lowerCAmelCase_ :Optional[Any] = 0
lowerCAmelCase_ :List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __lowerCAmelCase ( self ) -> str:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self ) -> Any:
pass
@slow
def __lowerCAmelCase ( self ) -> str:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Dict = ViTMAEModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase_ :List[Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__A )
lowerCAmelCase_ :Optional[Any] = self.default_image_processor
lowerCAmelCase_ :Optional[int] = prepare_img()
lowerCAmelCase_ :Dict = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase_ :Tuple = ViTMAEConfig()
lowerCAmelCase_ :Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase_ :Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCAmelCase_ :Tuple = model(**__A , noise=torch.from_numpy(__A ).to(device=__A ) )
# verify the logits
lowerCAmelCase_ :str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__A ) , atol=1E-4 ) )
| 256
|
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A=None ) -> Tuple:
lowerCAmelCase_ :Optional[int] = data
lowerCAmelCase_ :List[Any] = None
def __repr__( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = []
lowerCAmelCase_ :int = self
while temp:
string_rep.append(f"""{temp.data}""" )
lowerCAmelCase_ :List[str] = temp.next
return "->".join(__A )
def _snake_case ( lowercase__ : list ) -> Union[str, Any]:
'''simple docstring'''
if not elements_list:
raise Exception("""The Elements List is empty""" )
lowerCAmelCase_ :int = Node(elements_list[0] )
for i in range(1 , len(lowercase__ ) ):
lowerCAmelCase_ :Tuple = Node(elements_list[i] )
lowerCAmelCase_ :Union[str, Any] = current.next
return head
def _snake_case ( lowercase__ : Node ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(lowercase__ , lowercase__ ):
print_reverse(head_node.next )
print(head_node.data )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase_ :Union[str, Any] = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("""Linked List:""" )
print(lowercase__ )
print("""Elements in Reverse:""" )
print_reverse(lowercase__ )
if __name__ == "__main__":
main()
| 256
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = """ZinengTang/tvlt-base"""
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def UpperCamelCase ( self , **A__ ) -> List[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **a_ )
def UpperCamelCase ( self , **A__ ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a_ )
def UpperCamelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a_ )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
_SCREAMING_SNAKE_CASE = np.ones([1_20_00] )
_SCREAMING_SNAKE_CASE = feature_extractor(a_ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = processor(audio=a_ , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
_SCREAMING_SNAKE_CASE = np.ones([3, 2_24, 2_24] )
_SCREAMING_SNAKE_CASE = image_processor(a_ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = processor(images=a_ , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
_SCREAMING_SNAKE_CASE = np.ones([1_20_00] )
_SCREAMING_SNAKE_CASE = np.ones([3, 2_24, 2_24] )
_SCREAMING_SNAKE_CASE = processor(audio=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 591
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowercase_ ( a ):
'''simple docstring'''
def __init__( self , *a_ , **a_ ) -> str:
"""simple docstring"""
super().__init__(*a_ , **a_ )
requires_backends(self , 'decord' )
self.check_model_type(a_ )
def snake_case_ ( self , a_=None , a_=None , a_=None ) -> int:
"""simple docstring"""
UpperCAmelCase = {}
if frame_sampling_rate is not None:
UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase = num_frames
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a_ , **a_ ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(a_ , **a_ )
def snake_case_ ( self , a_ , a_=None , a_=1 ) -> Tuple:
"""simple docstring"""
if num_frames is None:
UpperCAmelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCAmelCase = BytesIO(requests.get(a_ ).content )
UpperCAmelCase = VideoReader(a_ )
videoreader.seek(0 )
UpperCAmelCase = 0
UpperCAmelCase = num_frames * frame_sampling_rate - 1
UpperCAmelCase = np.linspace(a_ , a_ , num=a_ , dtype=np.intaa )
UpperCAmelCase = videoreader.get_batch(a_ ).asnumpy()
UpperCAmelCase = list(a_ )
UpperCAmelCase = self.image_processor(a_ , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model(**a_ )
return model_outputs
def snake_case_ ( self , a_ , a_=5 ) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(a_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
| 447
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCamelCase_ : List[str] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCamelCase_ : str = "UperNetConfig"
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , ):
super().__init__()
__snake_case = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ , )
__snake_case = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.ReLU()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.conv(SCREAMING_SNAKE_CASE_ )
__snake_case = self.batch_norm(SCREAMING_SNAKE_CASE_ )
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return output
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
__snake_case = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE_ ),
UperNetConvModule(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = input
for layer in self.layers:
__snake_case = layer(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
__snake_case = pool_scales
__snake_case = align_corners
__snake_case = in_channels
__snake_case = channels
__snake_case = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE_ ):
__snake_case = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , channels=SCREAMING_SNAKE_CASE_ )
self.blocks.append(SCREAMING_SNAKE_CASE_ )
self.add_module(str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = []
for ppm in self.blocks:
__snake_case = ppm(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE_ )
return ppm_outs
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
__snake_case = config
__snake_case = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case = in_channels
__snake_case = config.hidden_size
__snake_case = False
__snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case = nn.ModuleList()
__snake_case = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case = UperNetConvModule(SCREAMING_SNAKE_CASE_ , self.channels , kernel_size=1 )
__snake_case = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE_ )
self.fpn_convs.append(SCREAMING_SNAKE_CASE_ )
__snake_case = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __lowerCamelCase ( self ):
self.apply(self._init_weights )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = inputs[-1]
__snake_case = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE_ ) )
__snake_case = torch.cat(SCREAMING_SNAKE_CASE_ , dim=1 )
__snake_case = self.bottleneck(SCREAMING_SNAKE_CASE_ )
return output
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
# build laterals
__snake_case = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE_ ) )
# build top-down path
__snake_case = len(SCREAMING_SNAKE_CASE_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case = laterals[i - 1].shape[2:]
__snake_case = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE_ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
__snake_case = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
__snake_case = torch.cat(SCREAMING_SNAKE_CASE_ , dim=1 )
__snake_case = self.fpn_bottleneck(SCREAMING_SNAKE_CASE_ )
__snake_case = self.classifier(SCREAMING_SNAKE_CASE_ )
return output
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 1 ):
super().__init__()
__snake_case = config
__snake_case = config.auxiliary_in_channels
__snake_case = config.auxiliary_channels
__snake_case = config.auxiliary_num_convs
__snake_case = config.auxiliary_concat_input
__snake_case = in_index
__snake_case = (kernel_size // 2) * dilation
__snake_case = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ ) )
if self.num_convs == 0:
__snake_case = nn.Identity()
else:
__snake_case = nn.Sequential(*SCREAMING_SNAKE_CASE_ )
if self.concat_input:
__snake_case = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 )
__snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __lowerCamelCase ( self ):
self.apply(self._init_weights )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
# just take the relevant feature maps
__snake_case = encoder_hidden_states[self.in_index]
__snake_case = self.convs(SCREAMING_SNAKE_CASE_ )
if self.concat_input:
__snake_case = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case = self.classifier(SCREAMING_SNAKE_CASE_ )
return output
class _lowerCamelCase (lowerCamelCase ):
lowercase__ = UperNetConfig
lowercase__ = """pixel_values"""
lowercase__ = True
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __lowerCamelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = value
lowerCamelCase_ : Any = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowerCamelCase_ : List[str] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCamelCase , )
class _lowerCamelCase (lowerCamelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case = UperNetHead(SCREAMING_SNAKE_CASE_ , in_channels=self.backbone.channels )
__snake_case = UperNetFCNHead(SCREAMING_SNAKE_CASE_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
__snake_case = outputs.feature_maps
__snake_case = self.decode_head(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.functional.interpolate(SCREAMING_SNAKE_CASE_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
__snake_case = None
if self.auxiliary_head is not None:
__snake_case = self.auxiliary_head(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
__snake_case = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
__snake_case = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case = (logits,) + outputs[1:]
else:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 345
|
lowerCamelCase_ : List[str] = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
lowerCamelCase_ : List[str] = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def __lowercase( __snake_case : float ,__snake_case : str ,__snake_case : str ) -> float:
__snake_case = from_type.lower().strip('s' )
__snake_case = to_type.lower().strip('s' )
__snake_case = UNIT_SYMBOL.get(__snake_case ,__snake_case )
__snake_case = UNIT_SYMBOL.get(__snake_case ,__snake_case )
if from_sanitized not in METRIC_CONVERSION:
__snake_case = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
__snake_case = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
__snake_case = METRIC_CONVERSION[from_sanitized]
__snake_case = METRIC_CONVERSION[to_sanitized]
__snake_case = 1
if from_exponent > to_exponent:
__snake_case = from_exponent - to_exponent
else:
__snake_case = -(to_exponent - from_exponent)
return value * pow(10 ,__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 345
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : Optional[Any] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
__lowercase : List[str] =5
__lowercase : int =10
@require_sentencepiece
@require_tokenizers
class A ( __lowercase , unittest.TestCase ):
_snake_case =SpeechaTextTokenizer
_snake_case =False
_snake_case =True
def lowerCAmelCase__ ( self: List[str] ) -> Any:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =sp.SentencePieceProcessor()
spm_model.Load(_lowerCAmelCase )
UpperCAmelCase_ =["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowerCAmelCase ) )]
UpperCAmelCase_ =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase_ =Path(self.tmpdirname )
save_json(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ ="<pad>"
UpperCAmelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowerCAmelCase ) , 1001 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCAmelCase_ =tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [289, 50, 14, 174, 386] , )
UpperCAmelCase_ =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
UpperCAmelCase_ =tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
UpperCAmelCase_ =tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ={"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
_snake_case ='''valhalla/s2t_mustc_multilinguial_medium'''
_snake_case ='''C\'est trop cool'''
_snake_case ='''Esto es genial'''
@classmethod
def lowerCAmelCase__ ( cls: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def lowerCAmelCase__ ( self: Any ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def lowerCAmelCase__ ( self: str ) -> int:
'''simple docstring'''
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ =[ES_CODE, 4, 1601, 47, 7647, 2]
UpperCAmelCase_ =self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="fr"
UpperCAmelCase_ =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowerCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ ="fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
UpperCAmelCase_ ="es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 54
|
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
if len(_snake_case ) != len(_snake_case ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase__ : Any = [p / w for p, w in zip(_snake_case ,_snake_case )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase__ : Union[str, Any] = sorted(_snake_case )
# declaring useful variables
UpperCAmelCase__ : Tuple = len(_snake_case )
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Any = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase__ : Tuple = sorted_profit_by_weight[length - i - 1]
UpperCAmelCase__ : int = profit_by_weight.index(_snake_case )
UpperCAmelCase__ : str = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
UpperCamelCase__ = [int(x) for x in input('Input profits separated by spaces: ').split()]
UpperCamelCase__ = [int(x) for x in input('Input weights separated by spaces: ').split()]
UpperCamelCase__ = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 110
| 0
|
'''simple docstring'''
def A_( A : float):
return 10 - x * x
def A_( A : float , A : float):
# Bolzano theory in order to find if there is a root between a and b
if equation(a_) * equation(a_) >= 0:
raise ValueError('Wrong space!')
UpperCamelCase = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase = (a + b) / 2
# Check if middle point is root
if equation(a_) == 0.0:
break
# Decide the side to repeat the steps
if equation(a_) * equation(a_) < 0:
UpperCamelCase = c
else:
UpperCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 705
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger('transformers.models.speecht5')
lowerCAmelCase : Tuple = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
lowerCAmelCase : List[str] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
lowerCAmelCase : Dict = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
lowerCAmelCase : Optional[Any] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
lowerCAmelCase : Any = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
lowerCAmelCase : Optional[int] = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
lowerCAmelCase : List[Any] = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
lowerCAmelCase : str = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
lowerCAmelCase : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCAmelCase : int = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
lowerCAmelCase : int = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
lowerCAmelCase : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
lowerCAmelCase : Any = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def A_( A : Optional[Any] , A : Dict , A : str , A : Optional[int] , A : List[str]):
for attribute in key.split('.'):
UpperCamelCase = getattr(A , A)
if weight_type is not None:
UpperCamelCase = getattr(A , A).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''')
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''')
def A_( A : List[str] , A : Tuple):
for key in ignore_keys:
if key.endswith('.*'):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
UpperCamelCase , UpperCamelCase = key.split('.*.')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_( A : Union[str, Any] , A : List[str] , A : Optional[int]):
UpperCamelCase = []
if task == "s2t":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2T
UpperCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase = None
UpperCamelCase = MAPPING_T2S
UpperCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2S
UpperCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''')
for name, value in fairseq_dict.items():
if should_ignore(A , A):
logger.info(f'''{name} was ignored''')
continue
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase , UpperCamelCase = key.split('.*.')
if prefix in name and suffix in name:
UpperCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A)[0].split('.')[-2]
UpperCamelCase = mapped_key.replace('*' , A)
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(A , A , A , A , A)
continue
if not is_used:
unused_weights.append(A)
logger.warning(f'''Unused weights: {unused_weights}''')
def A_( A : Dict , A : Optional[int] , A : str , A : Dict , A : Any):
UpperCamelCase = full_name.split('conv_layers.')[-1]
UpperCamelCase = name.split('.')
UpperCamelCase = int(items[0])
UpperCamelCase = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(A)
@torch.no_grad()
def A_( A : Optional[Any] , A : List[str] , A : Tuple , A : Optional[Any]=None , A : Any=None , A : Optional[int]=None , ):
if config_path is not None:
UpperCamelCase = SpeechTaConfig.from_pretrained(A)
else:
UpperCamelCase = SpeechTaConfig()
if task == "s2t":
UpperCamelCase = config.max_text_positions
UpperCamelCase = SpeechTaForSpeechToText(A)
elif task == "t2s":
UpperCamelCase = 1876
UpperCamelCase = 600
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForTextToSpeech(A)
elif task == "s2s":
UpperCamelCase = 1876
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForSpeechToSpeech(A)
else:
raise ValueError(f'''Unknown task name: {task}''')
if vocab_path:
UpperCamelCase = SpeechTaTokenizer(A , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken('<mask>' , lstrip=A , rstrip=A)
UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = SpeechTaProcessor(tokenizer=A , feature_extractor=A)
processor.save_pretrained(A)
UpperCamelCase = torch.load(A)
recursively_load_weights(fairseq_checkpoint['model'] , A , A)
model.save_pretrained(A)
if repo_id:
print('Pushing to the hub...')
processor.push_to_hub(A)
model.push_to_hub(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 432
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_A : List[str] = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
_A : Any = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
_A : Dict = logging.get_logger(__name__)
_A : str = """ Hello world! cécé herlolip"""
_A : str = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def __snake_case ( lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
SCREAMING_SNAKE_CASE__ = dct.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
def __snake_case ( lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def __snake_case ( lowerCAmelCase_ ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Tuple:
if not os.path.exists(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = torch.hub.load('''pytorch/fairseq''' , lowerCAmelCase_ ).eval()
else:
SCREAMING_SNAKE_CASE__ = load_xsum_checkpoint(lowerCAmelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = checkpoint_path.replace('''.''' , '''-''' )
SCREAMING_SNAKE_CASE__ = BartConfig.from_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = bart.encode(lowerCAmelCase_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = BartTokenizer.from_pretrained(lowerCAmelCase_ ).encode(lowerCAmelCase_ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(lowerCAmelCase_ , lowerCAmelCase_ ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
SCREAMING_SNAKE_CASE__ = bart.state_dict()
remove_ignore_keys_(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = BartForSequenceClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = bart.predict('''mnli''' , lowerCAmelCase_ , return_logits=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(lowerCAmelCase_ )[0] # logits
else: # no classification heads to worry about
SCREAMING_SNAKE_CASE__ = bart.model.state_dict()
remove_ignore_keys_(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = state_dict['''decoder.embed_tokens.weight''']
SCREAMING_SNAKE_CASE__ = bart.extract_features(lowerCAmelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
SCREAMING_SNAKE_CASE__ = BartModel(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(lowerCAmelCase_ ).model[0]
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration(lowerCAmelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , '''lm_head''' ):
SCREAMING_SNAKE_CASE__ = make_linear_from_emb(model.model.shared )
SCREAMING_SNAKE_CASE__ = model.model(lowerCAmelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
_A : Union[str, Any] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 100
|
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = weight
def __repr__( self ):
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase_ ( self ):
'''simple docstring'''
return self.value
def lowercase_ ( self ):
'''simple docstring'''
return self.name
def lowercase_ ( self ):
'''simple docstring'''
return self.weight
def lowercase_ ( self ):
'''simple docstring'''
return self.value / self.weight
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(_UpperCamelCase )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = 'weight_v'
elif "weight" in name:
SCREAMING_SNAKE_CASE = 'weight'
elif "bias" in name:
SCREAMING_SNAKE_CASE = 'bias'
else:
SCREAMING_SNAKE_CASE = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE = name.split('.' )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE = target_dict.pad_index
SCREAMING_SNAKE_CASE = target_dict.bos_index
SCREAMING_SNAKE_CASE = target_dict.eos_index
SCREAMING_SNAKE_CASE = len(target_dict.symbols )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab.json' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = HubertForCTC(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = HubertModel(_UpperCamelCase )
if is_finetuned:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
a_ : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 702
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673
| 0
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a_ : List[str] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a_ ( __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =test_results.split(''' ''' )
lowerCamelCase_ =0
lowerCamelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase_ =expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a_ ( __snake_case : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =None
lowerCamelCase_ =False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __snake_case ):
lowerCamelCase_ =True
lowerCamelCase_ =line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowerCamelCase_ =line
lowerCamelCase_ =False
return failures
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =title
lowerCamelCase_ =doc_test_results['''time_spent'''].split(''',''' )[0]
lowerCamelCase_ =doc_test_results['''success''']
lowerCamelCase_ =doc_test_results['''failures''']
lowerCamelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase_ =doc_test_results
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[self._time_spent]
lowerCamelCase_ =0
for time in time_spent:
lowerCamelCase_ =time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =[0, 0, time_parts[0]]
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f'''{int(lowerCAmelCase )}h{int(lowerCAmelCase )}m{int(lowerCAmelCase )}s'''
@property
def lowercase__ ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =40
lowerCamelCase_ ={k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase, lowerCAmelCase )}
lowerCamelCase_ =''''''
for category, failures in category_failures.items():
if len(lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase )
@staticmethod
def lowercase__ ( ):
"""simple docstring"""
lowerCamelCase_ =[
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowerCamelCase_ =f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
lowerCamelCase_ =client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=lowerCAmelCase, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =''''''
for key, value in failures.items():
lowerCamelCase_ =value[:200] + ''' [Truncated]''' if len(lowerCAmelCase ) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
lowerCamelCase_ =job_name
lowerCamelCase_ ={'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowerCamelCase_ ={
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase__ ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowerCamelCase_ =self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowerCamelCase_ =sorted(self.doc_test_results.items(), key=lambda lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowerCamelCase_ =f'''*Num failures* :{len(job_result['failed'] )} \n'''
lowerCamelCase_ =job_result['''failures''']
lowerCamelCase_ =self.get_reply_blocks(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, text=lowerCAmelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f'''Results for {job}''', blocks=lowerCAmelCase, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase_ =os.environ['''GITHUB_RUN_ID''']
lowerCamelCase_ =F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowerCamelCase_ =requests.get(__snake_case ).json()
lowerCamelCase_ ={}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCamelCase_ =math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__snake_case ):
lowerCamelCase_ =requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __snake_case )
return {}
def a_ ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={}
if os.path.exists(__snake_case ):
lowerCamelCase_ =os.listdir(__snake_case )
for file in files:
try:
with open(os.path.join(__snake_case , __snake_case ) , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__snake_case , __snake_case )}.''' ) from e
return _artifact
def a_ ( ) -> Tuple:
"""simple docstring"""
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =name
lowerCamelCase_ =[]
def __str__( self ):
"""simple docstring"""
return self.name
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
lowerCamelCase_ ={}
lowerCamelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase_ =directory
if artifact_name not in _available_artifacts:
lowerCamelCase_ =Artifact(__snake_case )
_available_artifacts[artifact_name].add_path(__snake_case )
return _available_artifacts
if __name__ == "__main__":
a_ : Optional[int] = get_job_links()
a_ : List[Any] = retrieve_available_artifacts()
a_ : Union[str, Any] = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a_ : List[str] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a_ : List[Any] = github_actions_job_links.get("""run_doctests""")
a_ : Dict = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
a_ : Union[str, Any] = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
a_ , a_ , a_ : Any = handle_test_results(artifact["""stats"""])
a_ : int = failed
a_ : str = success
a_ : Tuple = time_spent[1:-1] + """, """
a_ : int = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
a_ : Optional[int] = line.replace("""FAILED """, """""")
a_ : Optional[int] = line.split()[0].replace("""\n""", """""")
if "::" in line:
a_ , a_ : int = line.split("""::""")
else:
a_ , a_ : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a_ : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a_ : Optional[int] = all_failures[test] if test in all_failures else """N/A"""
a_ : Any = failure
break
a_ : List[Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 676
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase__ ( __snake_case ):
@staticmethod
@abstractmethod
def A__ ( A__ ):
raise NotImplementedError()
@abstractmethod
def A__ ( self ):
raise NotImplementedError()
| 701
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCamelCase : Union[str, Any] ='\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCamelCase : List[str] ='\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCamelCase : str ='\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a__ (__lowercase :List[Any] , __lowercase :List[Any] ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a__ (__lowercase :Tuple , __lowercase :List[Any] , __lowercase :Union[str, Any]="binary" ) -> Optional[Any]:
_A : Union[str, Any] = simple_accuracy(__lowercase , __lowercase )
_A : str = float(fa_score(y_true=__lowercase , y_pred=__lowercase , average=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ (__lowercase :List[str] , __lowercase :Optional[Any] ) -> List[str]:
_A : str = {}
for id_pred, label in zip(__lowercase , __lowercase ):
_A : Optional[int] = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_A : Tuple = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_A : Union[str, Any] = [(pred, label)]
_A , _A : List[Any] = [], []
for question, preds_labels in question_map.items():
_A , _A : List[str] = zip(*__lowercase )
_A : Union[str, Any] = fa_score(y_true=__lowercase , y_pred=__lowercase , average='''macro''' )
fas.append(__lowercase )
_A : Optional[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__lowercase ) )
ems.append(__lowercase )
_A : Optional[int] = float(sum(__lowercase ) / len(__lowercase ) )
_A : Dict = sum(__lowercase ) / len(__lowercase )
_A : List[Any] = float(fa_score(y_true=__lowercase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A__ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None ,)
def A__ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def A__ ( self ,A__ ,A__ ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(A__ ,A__ )}
elif self.config_name == "cb":
return acc_and_fa(A__ ,A__ ,fa_avg='''macro''' )
elif self.config_name == "record":
_A : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_A : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(A__ ,A__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(A__ ,A__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(A__ ,A__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 332
| 0
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__UpperCAmelCase : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
__UpperCAmelCase : Dict = f"""{src_lang}-{tgt_lang}"""
__UpperCAmelCase : List[Any] = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
__UpperCAmelCase : List[Any] = os.path.join(_UpperCAmelCase , """README.md""" )
print(f"""Generating {path}""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(_UpperCAmelCase )
# make sure we are under the root of the project
a : Optional[Any] = Path(__file__).resolve().parent.parent.parent
a : List[Any] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a : Dict = model_name.split("-")
a : Dict = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 63
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a__ ( unittest.TestCase ):
_A = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_A = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase ( self : Tuple , A_ : Optional[Any] , A_ : Optional[Any] , A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = AudioClassificationPipeline(model=A_ , feature_extractor=A_ )
# test with a raw waveform
lowerCamelCase_: Optional[int] = np.zeros((3_40_00,) )
lowerCamelCase_: Tuple = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def lowerCAmelCase ( self : Tuple , A_ : Optional[Any] , A_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: List[Any] = examples
lowerCamelCase_: List[str] = audio_classifier(A_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
A_ , [
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
] , )
lowerCamelCase_: Dict = audio_classifier(A_ , top_k=1 )
self.assertEqual(
A_ , [
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
] , )
self.run_torchaudio(A_ )
@require_torchaudio
def lowerCAmelCase ( self : Optional[int] , A_ : str ) -> List[Any]:
"""simple docstring"""
import datasets
# test with a local file
lowerCamelCase_: Dict = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
lowerCamelCase_: List[Any] = dataset[0]["""audio"""]["""array"""]
lowerCamelCase_: Dict = audio_classifier(A_ )
self.assertEqual(
A_ , [
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
] , )
@require_torch
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[str] = """anton-l/wav2vec2-random-tiny-classifier"""
lowerCamelCase_: Tuple = pipeline("""audio-classification""" , model=A_ )
lowerCamelCase_: Optional[int] = np.ones((80_00,) )
lowerCamelCase_: List[str] = audio_classifier(A_ , top_k=4 )
lowerCamelCase_: Union[str, Any] = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
lowerCamelCase_: Optional[int] = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(A_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCamelCase_: Optional[Any] = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
lowerCamelCase_: Dict = audio_classifier(A_ , top_k=4 )
self.assertIn(nested_simplify(A_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
import datasets
lowerCamelCase_: Tuple = """superb/wav2vec2-base-superb-ks"""
lowerCamelCase_: Tuple = pipeline("""audio-classification""" , model=A_ )
lowerCamelCase_: Dict = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
lowerCamelCase_: List[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
lowerCamelCase_: Optional[Any] = audio_classifier(A_ , top_k=4 )
self.assertEqual(
nested_simplify(A_ , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
| 423
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCAmelCase__ ( ):
__a : Dict = torch.nn.Linear(2 , 4 )
__a : Dict = torch.optim.AdamW(model.parameters() , lr=1.0 )
__a : Any = torch.optim.lr_scheduler.OneCycleLR(lowerCamelCase_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__a : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__a : Optional[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCamelCase_ )
class _UpperCamelCase( __lowerCamelCase ):
@require_cuda
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = Accelerator(cpu=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = Accelerator()
__a : Optional[Any] = GradientState()
assert state.num_steps == 1
__a : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__a : str = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = Accelerator()
__a : Dict = create_components()
(
__a
) : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = Accelerator()
__a : List[Any] = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Any ):
pass
with patch('torch.cuda.set_device' , SCREAMING_SNAKE_CASE__ ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
__a : Tuple = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Tuple = Accelerator()
__a : Optional[int] = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = get_signature(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Optional[int] = Accelerator()
__a : int = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Any = get_signature(SCREAMING_SNAKE_CASE__ )
# saving hook
def save_config(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__a : List[Any] = {'class_name': models[0].__class__.__name__}
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'data.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# loading hook
def load_config(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'data.json' ) , 'r' ) as f:
__a : Optional[Any] = json.load(SCREAMING_SNAKE_CASE__ )
__a : Tuple = config['class_name']
__a : int = accelerator.register_save_state_pre_hook(SCREAMING_SNAKE_CASE__ )
__a : int = accelerator.register_load_state_pre_hook(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match with hooks
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
__a : int = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match with hooks removed
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
__a : Optional[int] = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Optional[Any] = Accelerator()
__a : Any = create_components()
__a : Tuple = None
# This should work
__a : int = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertTrue(dummy_obj is None )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : List[str] = Accelerator()
__a : Optional[Any] = create_components()
__a : Optional[int] = [1, 2, 3]
# This should work
__a : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , '_is_accelerate_prepared' , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , '_is_accelerate_prepared' , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , '_is_accelerate_prepared' , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , '_is_accelerate_prepared' , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , '_is_accelerate_prepared' , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , '_is_accelerate_prepared' , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__a : Dict = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map={'': 0} , )
__a : Tuple = Accelerator()
# This should work
__a : Optional[int] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@slow
@require_bnb
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__a : Tuple = Accelerator()
with init_empty_weights():
__a : Optional[int] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__a : Union[str, Any] = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
__a : Dict = 'cpu'
__a : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=SCREAMING_SNAKE_CASE__ , load_in_abit=SCREAMING_SNAKE_CASE__ , llm_inta_enable_fpaa_cpu_offload=SCREAMING_SNAKE_CASE__ )
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__a : int = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__a : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
__a : Any = 1
__a : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , )
__a : List[Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__a : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__a : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
__a : str = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = 1
__a : List[str] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , )
__a : List[str] = Accelerator()
# This should work
__a : Union[str, Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@require_cuda
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = torch.nn.Linear(1_0 , 1_0 )
__a : int = torch.optim.SGD(model.parameters() , lr=0.01 )
__a : str = Accelerator(cpu=SCREAMING_SNAKE_CASE__ )
__a : Tuple = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
| 705
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 12_8022
SCREAMING_SNAKE_CASE__ = 12_8028
@require_sentencepiece
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = MaMaaaTokenizer
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
__a : Dict = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : Any = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__a : List[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Dict = '</s>'
__a : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.get_tokenizer()
__a : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Dict = self.get_tokenizer()
__a : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2, 3, 4, 5, 6] , )
__a : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__a : Optional[int] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'This is a test' )
@slow
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Tuple = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''facebook/m2m100_418M'''
__SCREAMING_SNAKE_CASE : List[Any] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__SCREAMING_SNAKE_CASE : Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
__a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__a : Optional[int] = 1
return cls
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Union[str, Any] = 'en'
__a : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
# fmt: off
__a : Any = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__a : List[str] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Tuple = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = 'en'
__a : int = 'fr'
__a : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : str = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__a : Optional[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__a : Optional[Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__a : Optional[int] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# en_XX, A, test, EOS
'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 1_2_8_0_0_6,
} , )
| 577
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class UpperCAmelCase ( A_ ,A_ ):
A__ : Optional[Any] = "dinat"
A__ : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : List[str] , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=3 , snake_case__ : Optional[Any]=64 , snake_case__ : List[Any]=[3, 4, 6, 5] , snake_case__ : Dict=[2, 4, 8, 16] , snake_case__ : Dict=7 , snake_case__ : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , snake_case__ : Tuple=3.0 , snake_case__ : Optional[int]=True , snake_case__ : List[str]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Any="gelu" , snake_case__ : Optional[Any]=0.02 , snake_case__ : int=1e-5 , snake_case__ : Optional[Any]=0.0 , snake_case__ : List[Any]=None , snake_case__ : str=None , **snake_case__ : List[Any] , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : List[Any] = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : List[Any] = embed_dim
snake_case : int = depths
snake_case : Dict = len(snake_case__ )
snake_case : Optional[int] = num_heads
snake_case : int = kernel_size
snake_case : int = dilations
snake_case : Optional[Any] = mlp_ratio
snake_case : List[Any] = qkv_bias
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Dict = drop_path_rate
snake_case : List[str] = hidden_act
snake_case : Tuple = layer_norm_eps
snake_case : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case : Dict = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
snake_case : int = layer_scale_init_value
snake_case : List[str] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(snake_case__ ) + 1 )]
snake_case , snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 204
|
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
snake_case : Dict = set()
# Replace all the whitespace in our sentence
snake_case : List[Any] = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowerCamelCase ) == 26
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
snake_case : Optional[int] = [False] * 26
for char in input_str:
if char.islower():
snake_case : Dict = True
elif char.isupper():
snake_case : Optional[Any] = True
return all(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase ( ):
from timeit import timeit
snake_case : Tuple = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=__lowerCamelCase ) )
print(timeit("is_pangram_faster()" , setup=__lowerCamelCase ) )
print(timeit("is_pangram_fastest()" , setup=__lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 204
| 1
|
from __future__ import annotations
import math
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase__ : Optional[Any] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
SCREAMING_SNAKE_CASE_ = []
for num in range(len(__snake_case ) ):
SCREAMING_SNAKE_CASE_ = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE_ = odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def _UpperCAmelCase ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 712
|
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
UpperCAmelCase = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Tuple = BertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> str:
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A_ ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(A_ , normalizer_state.pop("""type""" ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**A_ )
lowerCAmelCase = do_lower_case
def __snake_case ( self , A_ , A_=None ) -> Dict:
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 433
|
'''simple docstring'''
UpperCAmelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_5818,
}
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 433
| 1
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , lowercase , )
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = "roberta"
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__(UpperCamelCase )
lowerCamelCase_ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , lowercase , )
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = "roberta"
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__(UpperCamelCase )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = config.num_hidden_layers
lowerCamelCase_ = DeeRobertaModel(UpperCamelCase )
lowerCamelCase_ = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase_ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def snake_case ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=-1 , UpperCamelCase=False , ):
"""simple docstring"""
lowerCamelCase_ = self.num_layers
try:
lowerCamelCase_ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
lowerCamelCase_ = outputs[1]
lowerCamelCase_ = self.dropout(UpperCamelCase )
lowerCamelCase_ = self.classifier(UpperCamelCase )
lowerCamelCase_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase_ = e.message
lowerCamelCase_ = e.exit_layer
lowerCamelCase_ = outputs[0]
if not self.training:
lowerCamelCase_ = entropy(UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ = MSELoss()
lowerCamelCase_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase_ = []
for highway_exit in outputs[-1]:
lowerCamelCase_ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ = MSELoss()
lowerCamelCase_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
lowerCamelCase_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase_ = (loss,) + outputs
if not self.training:
lowerCamelCase_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 445
|
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 445
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
"""simple docstring"""
for param in module.parameters():
_UpperCAmelCase = False
def A__ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE_ )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE_ )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE_ )
plt.show()
def A__ ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 32
|
def UpperCAmelCase__ ( __magic_name__ : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase : Dict = set()
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : List[Any] = n + 1 # maximum limit
for a in range(2 , __magic_name__ ):
for b in range(2 , __magic_name__ ):
lowerCAmelCase : Tuple = a**b # calculates the current power
collect_powers.add(__magic_name__ ) # adds the result to the set
return len(__magic_name__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 348
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = ["""pixel_values"""]
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str = True , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : List[Any] = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Tuple = True , SCREAMING_SNAKE_CASE__ : List[Any] = 1 / 2_55 , SCREAMING_SNAKE_CASE__ : Dict = True , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Any = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = size if size is not None else {'height': 3_84, 'width': 3_84}
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = resample
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ = do_convert_rgb
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Tuple = None , **SCREAMING_SNAKE_CASE__ : Any , ):
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
lowerCamelCase__ = (size['height'], size['width'])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict = None , **SCREAMING_SNAKE_CASE__ : Any , ):
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict = None , **SCREAMING_SNAKE_CASE__ : Any , ):
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : str = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ = image_std if image_std is not None else self.image_std
lowerCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowerCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowerCamelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCamelCase__ = BatchFeature(data={'pixel_values': images} , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_outputs
| 716
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 219
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ : Any = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = ["""LayoutLMv2FeatureExtractor"""]
__magic_name__ : Any = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615
| 0
|
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = []
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
return self.node_position[vertex]
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = pos
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> Any:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_a = 2 * start + 1
else:
_a = 2 * start + 2
if heap[smallest_child] < heap[start]:
_a , _a = heap[smallest_child], positions[smallest_child]
_a , _a = (
heap[start],
positions[start],
)
_a , _a = temp, tempa
_a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCAmelCase_ )
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ) -> Any:
"""simple docstring"""
_a = position[index]
while index != 0:
_a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_a = heap[parent]
_a = position[parent]
self.set_position(position[parent] , lowerCAmelCase_ )
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , lowerCAmelCase_ )
break
_a = parent
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , 0 )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = len(lowerCAmelCase_ ) // 2 - 1
for i in range(lowerCAmelCase_ , -1 , -1 ):
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
_a = positions[0]
_a = sys.maxsize
self.top_to_bottom(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return temp
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = Heap()
_a = [0] * len(UpperCamelCase )
_a = [-1] * len(UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_a = [] # Heap of Distance of vertices from their neighboring vertex
_a = []
for vertex in range(len(UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase )
heap.node_position.append(UpperCamelCase )
_a = []
_a = 1
_a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_a = 0
_a = distance
heap.heapify(UpperCamelCase , UpperCamelCase )
for _ in range(1 , len(UpperCamelCase ) ):
_a = heap.delete_minimum(UpperCamelCase , UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase )]
):
_a = distance
heap.bottom_to_top(
UpperCamelCase , heap.get_position(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
_a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_snake_case : List[str] = int(input('Enter number of edges: ').strip())
_snake_case : Union[str, Any] = defaultdict(list)
for _ in range(edges_number):
_snake_case : Tuple = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 377
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case : List[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['BeitFeatureExtractor']
_snake_case : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 377
| 1
|
from __future__ import annotations
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> tuple[float, list[float]]:
A__ = list(range(len(__UpperCamelCase ) ) )
A__ = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
A__ = 0
A__ = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9
| 1
|
def __lowercase ( _UpperCAmelCase ) -> str:
'''simple docstring'''
return "".join(chr(ord(_UpperCAmelCase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
def is_valid_tree(_UpperCAmelCase ) -> bool:
if node is None:
return True
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_UpperCAmelCase ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _UpperCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _UpperCAmelCase )
)
return is_binary_search_tree_recursive_check(_UpperCAmelCase , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576
| 0
|
"""simple docstring"""
import torch
from torch import nn
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=False ):
super().__init__()
__a : List[Any] = n_token
__a : List[Any] = d_embed
__a : Union[str, Any] = d_proj
__a : List[str] = cutoffs + [n_token]
__a : List[Any] = [0] + self.cutoffs
__a : Optional[int] = div_val
__a : str = self.cutoffs[0]
__a : int = len(self.cutoffs ) - 1
__a : Dict = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__a : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__a : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
__a : Optional[int] = nn.ModuleList()
__a : Dict = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
else:
self.out_projs.append(_UpperCAmelCase )
self.out_layers.append(nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
__a : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(_UpperCAmelCase , r_idx - l_idx ) )
__a : str = keep_order
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if proj is None:
__a : int = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__a : Dict = nn.functional.linear(_UpperCAmelCase , proj.t().contiguous() )
__a : Tuple = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if labels is not None:
# Shift so that tokens < n predict n
__a : Tuple = hidden[..., :-1, :].contiguous()
__a : Tuple = labels[..., 1:].contiguous()
__a : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
__a : str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__a : Optional[int] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__a : Dict = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__a : str = labels != -100
__a : Optional[Any] = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
__a : Union[str, Any] = (
-nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__a : Optional[int] = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
__a : Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__a : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a : str = self.out_layers[0].weight[l_idx:r_idx]
__a : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
__a : int = self.out_layers[i].weight
__a : int = self.out_layers[i].bias
if i == 0:
__a : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__a : str = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
__a : int = weights[0], biases[0], self.out_projs[0]
__a : Any = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : Optional[int] = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
if labels is None:
__a : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__a : Optional[int] = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
__a : Optional[Any] = 0
__a : Union[str, Any] = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
__a : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__a : Tuple = (labels >= l_idx) & (labels < r_idx)
__a : Union[str, Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__a : Optional[int] = labels.index_select(0 , _UpperCAmelCase ) - l_idx
__a : int = head_logprob.index_select(0 , _UpperCAmelCase )
__a : List[Any] = hidden.index_select(0 , _UpperCAmelCase )
else:
__a : Optional[Any] = hidden
if i == 0:
if labels is not None:
__a : Tuple = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__a : Any = head_logprob[:, : self.cutoffs[0]]
else:
__a : List[Any] = weights[i], biases[i], self.out_projs[i]
__a : Optional[Any] = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : str = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
__a : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__a : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__a : Optional[int] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__a : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.n_clusters == 0:
__a : Tuple = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
__a : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__a : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a : int = self.out_layers[0].weight[l_idx:r_idx]
__a : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
__a : str = self.out_layers[i].weight
__a : Any = self.out_layers[i].bias
if i == 0:
__a : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__a : str = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
__a : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
__a : List[Any] = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__a : Optional[int] = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
__a : Dict = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
__a : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__a : str = head_logprob[:, : self.cutoffs[0]]
else:
__a : str = weights[i], biases[i], self.out_projs[i]
__a : int = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
__a : Optional[int] = head_logprob[:, -i] + tail_logprob_i
__a : Optional[int] = logprob_i
return out
| 52
|
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowercase ) )
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
lowerCamelCase_ : Dict = 0.0
for coeff in reversed(_lowercase ):
lowerCamelCase_ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
__lowercase : Any = (0.0, 0.0, 5.0, 9.3, 7.0)
__lowercase : Optional[int] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 422
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : str )-> None:
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 517
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : list[int] ) -> int:
snake_case = len(__lowerCAmelCase ) // 2
# choose the middle 3 elements
snake_case = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517
| 1
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCAmelCase ( A_ : str , A_ : List[Any] , A_ : List[str] , A_ : Union[str, Any] ) -> str:
__UpperCAmelCase = {
"en": "Machine learning is great, isn\'t it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
__UpperCAmelCase = F'''{src_lang}-{tgt_lang}'''
__UpperCAmelCase = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowercase , exist_ok=__lowercase )
__UpperCAmelCase = os.path.join(__lowercase , "README.md" )
print(F'''Generating {path}''' )
with open(__lowercase , "w" , encoding="utf-8" ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 221
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 686
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __UpperCamelCase ( lowercase__ ):
__snake_case :str = 'swin2sr'
__snake_case :Dict = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Any=180 , _lowerCAmelCase : Optional[int]=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : str=8 , _lowerCAmelCase : List[str]=2.0 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Any=False , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : str=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : int="1conv" , _lowerCAmelCase : str="pixelshuffle" , **_lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(__lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = "data2vec-text"
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.0_2 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Optional[int]:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 433
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"""do_convert_rgb""": True,
}
lowerCAmelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A_ , A_ )
def __snake_case ( self , **A_ ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __snake_case ( self , **A_ ) -> Dict:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def __snake_case ( self , **A_ ) -> List[Any]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __snake_case ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=A_ )
lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(A_ , return_tensors="""np""" )
lowerCAmelCase = processor(images=A_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase = processor(text=A_ )
lowerCAmelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(A_ )
lowerCAmelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 433
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = 'mctct'
def __init__( self : Dict , snake_case__ : Any=8_065 , snake_case__ : Tuple=1_536 , snake_case__ : Union[str, Any]=36 , snake_case__ : List[Any]=6_144 , snake_case__ : Tuple=4 , snake_case__ : List[str]=384 , snake_case__ : List[Any]=920 , snake_case__ : Any=1E-5 , snake_case__ : List[Any]=0.3 , snake_case__ : List[str]="relu" , snake_case__ : List[Any]=0.0_2 , snake_case__ : Optional[Any]=0.3 , snake_case__ : List[str]=0.3 , snake_case__ : List[Any]=1 , snake_case__ : Optional[int]=0 , snake_case__ : Any=2 , snake_case__ : Any=1 , snake_case__ : List[str]=0.3 , snake_case__ : Tuple=1 , snake_case__ : Union[str, Any]=(7,) , snake_case__ : List[Any]=(3,) , snake_case__ : List[Any]=80 , snake_case__ : Optional[int]=1 , snake_case__ : List[str]=None , snake_case__ : int="sum" , snake_case__ : Dict=False , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = layerdrop
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_range
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = conv_glu_dim
__lowerCAmelCase = conv_dropout
__lowerCAmelCase = num_conv_layers
__lowerCAmelCase = input_feat_per_channel
__lowerCAmelCase = input_channels
__lowerCAmelCase = conv_channels
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = list(snake_case__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 714
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCamelCase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 2048-bit
1_4: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 3072-bit
1_5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 4096-bit
1_6: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 6144-bit
1_7: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 8192-bit
1_8: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
}
class a :
def __init__( self : Union[str, Any] , snake_case__ : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
__lowerCAmelCase = primes[group]["prime"]
__lowerCAmelCase = primes[group]["generator"]
__lowerCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def UpperCAmelCase__ ( self : Dict , snake_case__ : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase__ ( self : int , snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("Invalid public key" )
__lowerCAmelCase = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase__ ( snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
"""simple docstring"""
__lowerCAmelCase = int(snake_case__ , base=16 )
__lowerCAmelCase = int(snake_case__ , base=16 )
__lowerCAmelCase = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("Invalid public key" )
__lowerCAmelCase = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.