code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ : Dict = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def UpperCamelCase ( )-> str:
"""simple docstring"""
A__ = Github(os.environ["GITHUB_TOKEN"] )
A__ = g.get_repo("huggingface/diffusers" )
A__ = repo.get_issues(state="open" )
for issue in open_issues:
A__ = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
A__ = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 491
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = ['''image_processor''', '''tokenizer''']
_snake_case = '''FlavaImageProcessor'''
_snake_case = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = kwargs.pop('feature_extractor' )
__lowerCAmelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if images is not None:
__lowerCAmelCase : Optional[int] = self.image_processor(
SCREAMING_SNAKE_CASE , return_image_mask=SCREAMING_SNAKE_CASE , return_codebook_pixels=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if text is not None and images is not None:
encoding.update(SCREAMING_SNAKE_CASE )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE ) , tensor_type=SCREAMING_SNAKE_CASE )
def snake_case ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self ) -> Dict:
__lowerCAmelCase : Dict = self.tokenizer.model_input_names
__lowerCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self ) -> Any:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def snake_case ( self ) -> int:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE , )
return self.image_processor
| 123
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( a , unittest.TestCase ):
'''simple docstring'''
_snake_case = OpenAIGPTTokenizer
_snake_case = OpenAIGPTTokenizerFast
_snake_case = True
_snake_case = False
def snake_case ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowerCAmelCase : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Any:
return "lower newer", "lower newer"
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__lowerCAmelCase : List[str] = 'lower'
__lowerCAmelCase : Union[str, Any] = ['low', 'er</w>']
__lowerCAmelCase : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = tokens + ['<unk>']
__lowerCAmelCase : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
__lowerCAmelCase : Optional[Any] = 'This is a simple input'
__lowerCAmelCase : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
__lowerCAmelCase : int = ('This is a simple input', 'This is a pair')
__lowerCAmelCase : Optional[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
def snake_case ( self ) -> int:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase__ ( a ):
'''simple docstring'''
pass
| 123
| 1
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return abs(__UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , __UpperCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = y, x % y
return abs(__UpperCamelCase )
def A ( ):
'''simple docstring'''
try:
_lowerCAmelCase : int = input("Enter two integers separated by comma (,): " ).split("," )
_lowerCAmelCase : Optional[int] = int(nums[0] )
_lowerCAmelCase : Dict = int(nums[1] )
print(
F"greatest_common_divisor({num_a}, {num_a}) = "
F"{greatest_common_divisor(__UpperCamelCase , __UpperCamelCase )}" )
print(F"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__UpperCamelCase , __UpperCamelCase )}" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 500
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 86
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : Tuple = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = ["DeiTFeatureExtractor"]
_UpperCamelCase : Union[str, Any] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721
|
'''simple docstring'''
from manim import *
class _snake_case ( a_ ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase = [mem.copy() for i in range(6 )]
lowerCAmelCase = [mem.copy() for i in range(6 )]
lowerCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
lowerCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
lowerCAmelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
lowerCAmelCase = Text('CPU' , font_size=24 )
lowerCAmelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [mem.copy() for i in range(4 )]
lowerCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
lowerCAmelCase = Text('GPU' , font_size=24 )
lowerCAmelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [mem.copy() for i in range(6 )]
lowerCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
lowerCAmelCase = Text('Model' , font_size=24 )
lowerCAmelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(_SCREAMING_SNAKE_CASE )
model_arr.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
lowerCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
lowerCAmelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
lowerCAmelCase = Text('Disk' , font_size=24 )
lowerCAmelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.25, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = Square(0.3 )
input.set_fill(_SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(_SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.02 )
self.play(MoveToTarget(_SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = Arrow(start=_SCREAMING_SNAKE_CASE , end=_SCREAMING_SNAKE_CASE , color=_SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , _SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
lowerCAmelCase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase = AnimationGroup(
FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase = a_c
lowerCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE ) , FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
lowerCAmelCase = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(_SCREAMING_SNAKE_CASE ) )
self.wait()
| 514
| 0
|
import math
import unittest
def UpperCamelCase (lowercase_: int ) -> bool:
assert isinstance(lowercase_ , lowercase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __A ( self ):
with self.assertRaises(A__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 456
|
A_ : Union[str, Any] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A_ : str = [{'type': 'code', 'content': INSTALL_CONTENT}]
A_ : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 456
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self :Optional[int] , __magic_name__ :int , __magic_name__ :int=13 , __magic_name__ :Any=30 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=3 , __magic_name__ :Tuple=True , __magic_name__ :Optional[Any]=True , __magic_name__ :Union[str, Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :List[str]=4 , __magic_name__ :List[Any]=37 , __magic_name__ :str="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :str=0.1 , __magic_name__ :Optional[Any]=10 , __magic_name__ :int=0.02 , __magic_name__ :str=None , __magic_name__ :Optional[Any]=2 , ) -> int:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = scope
a__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ = (image_size // patch_size) ** 2
a__ = num_patches + 1
def _UpperCamelCase ( self :List[Any] ) -> str:
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCamelCase ( self :Any , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[int] ) -> Any:
'''simple docstring'''
a__ = ViTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self :Any , __magic_name__ :Dict , __magic_name__ :List[Any] , __magic_name__ :List[str] ) -> Optional[int]:
'''simple docstring'''
a__ = ViTForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a__ = 1
a__ = ViTForMaskedImageModeling(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(__magic_name__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self :Dict , __magic_name__ :int , __magic_name__ :str , __magic_name__ :Dict ) -> str:
'''simple docstring'''
a__ = self.type_sequence_label_size
a__ = ViTForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ = 1
a__ = ViTForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Union[str, Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ : str = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : List[Any] = True
snake_case__ : List[Any] = False
snake_case__ : Dict = False
snake_case__ : str = False
def _UpperCamelCase ( self :Tuple ) -> int:
'''simple docstring'''
a__ = ViTModelTester(self )
a__ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def _UpperCamelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _UpperCamelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
pass
def _UpperCamelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def _UpperCamelCase ( self :str ) -> List[Any]:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__magic_name__ )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _UpperCamelCase ( self :int ) -> Tuple:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _UpperCamelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def _UpperCamelCase ( self :int ) -> List[str]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def _UpperCamelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = ViTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __snake_case ( ) -> Any:
"""simple docstring"""
a__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__magic_name__ )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
a__ = model(**__magic_name__ )
# verify the logits
a__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
a__ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self :str ) -> int:
'''simple docstring'''
a__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__magic_name__ )
a__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 )
a__ = prepare_img()
a__ = image_processor(images=__magic_name__ , return_tensors='''pt''' )
a__ = inputs.pixel_values.to(__magic_name__ )
# forward pass
with torch.no_grad():
a__ = model(__magic_name__ , interpolate_pos_encoding=__magic_name__ )
# verify the logits
a__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
a__ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCamelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
a__ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__magic_name__ , return_tensors='''pt''' )
a__ = inputs.pixel_values.to(__magic_name__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a__ = model(__magic_name__ )
| 720
|
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
a__ = logging.get_logger()
# the current default level is logging.WARNING
a__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__magic_name__ )
def _UpperCamelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ = logging.get_verbosity()
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__magic_name__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def _UpperCamelCase ( self :int ) -> Tuple:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = os.getenv('''TRANSFORMERS_VERBOSITY''' , __magic_name__ )
a__ = logging.log_levels[env_level_str]
a__ = logging.get_verbosity()
self.assertEqual(
__magic_name__ , __magic_name__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
a__ = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def _UpperCamelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
a__ = logging.logging.getLogger()
with CaptureLogger(__magic_name__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def _UpperCamelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 158
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
UpperCAmelCase_ : Optional[int] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
UpperCAmelCase_ : Union[str, Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase_ : Dict = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase_ : str = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = [True] * limit
_UpperCAmelCase : int = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_UpperCAmelCase : Optional[int] = i * 2
while index < limit:
_UpperCAmelCase : str = False
_UpperCAmelCase : str = index + i
_UpperCAmelCase : List[Any] = [2]
for i in range(3 , _SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(_SCREAMING_SNAKE_CASE )
return primes
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = prime_sieve(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + length , len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase : List[str] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_UpperCAmelCase : Tuple = j - i
_UpperCAmelCase : int = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *_lowerCamelCase : Any , **_lowerCamelCase : Any ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 328
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 132
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : torch.FloatTensor
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : Tuple=3 , snake_case__ : Dict=3 , snake_case__ : Dict=("DownEncoderBlock2D",) , snake_case__ : Optional[Any]=(64,) , snake_case__ : List[Any]=2 , snake_case__ : Any=32 , snake_case__ : Tuple="silu" , snake_case__ : Tuple=True , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Tuple = layers_per_block
UpperCAmelCase__ : Optional[int] = torch.nn.Convad(
snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Union[str, Any] = nn.ModuleList([] )
# down
UpperCAmelCase__ : Any = block_out_channels[0]
for i, down_block_type in enumerate(snake_case__ ):
UpperCAmelCase__ : List[str] = output_channel
UpperCAmelCase__ : Dict = block_out_channels[i]
UpperCAmelCase__ : Tuple = i == len(snake_case__ ) - 1
UpperCAmelCase__ : Dict = get_down_block(
snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , )
self.down_blocks.append(snake_case__ )
# mid
UpperCAmelCase__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# out
UpperCAmelCase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1e-6 )
UpperCAmelCase__ : Union[str, Any] = nn.SiLU()
UpperCAmelCase__ : Dict = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ : Union[str, Any] = nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 )
UpperCAmelCase__ : Union[str, Any] = False
def UpperCamelCase ( self : int , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = x
UpperCAmelCase__ : Dict = self.conv_in(snake_case__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(snake_case__ : Dict ):
def custom_forward(*snake_case__ : List[str] ):
return module(*snake_case__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ )
# middle
UpperCAmelCase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ )
# middle
UpperCAmelCase__ : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ : Dict = down_block(snake_case__ )
# middle
UpperCAmelCase__ : Optional[Any] = self.mid_block(snake_case__ )
# post-process
UpperCAmelCase__ : Tuple = self.conv_norm_out(snake_case__ )
UpperCAmelCase__ : List[Any] = self.conv_act(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = self.conv_out(snake_case__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , snake_case__ : int=3 , snake_case__ : str=3 , snake_case__ : Union[str, Any]=("UpDecoderBlock2D",) , snake_case__ : Dict=(64,) , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=32 , snake_case__ : str="silu" , snake_case__ : Any="group" , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = layers_per_block
UpperCAmelCase__ : Any = nn.Convad(
snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = nn.ModuleList([] )
UpperCAmelCase__ : str = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# up
UpperCAmelCase__ : Tuple = list(reversed(snake_case__ ) )
UpperCAmelCase__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(snake_case__ ):
UpperCAmelCase__ : Dict = output_channel
UpperCAmelCase__ : List[Any] = reversed_block_out_channels[i]
UpperCAmelCase__ : List[Any] = i == len(snake_case__ ) - 1
UpperCAmelCase__ : Tuple = get_up_block(
snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , )
self.up_blocks.append(snake_case__ )
UpperCAmelCase__ : str = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ : Optional[Any] = SpatialNorm(block_out_channels[0] , snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1e-6 )
UpperCAmelCase__ : Dict = nn.SiLU()
UpperCAmelCase__ : Union[str, Any] = nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 )
UpperCAmelCase__ : Union[str, Any] = False
def UpperCamelCase ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tuple=None ):
'''simple docstring'''
UpperCAmelCase__ : str = z
UpperCAmelCase__ : List[str] = self.conv_in(snake_case__ )
UpperCAmelCase__ : Tuple = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(snake_case__ : Dict ):
def custom_forward(*snake_case__ : List[Any] ):
return module(*snake_case__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
UpperCAmelCase__ : List[Any] = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
else:
# middle
UpperCAmelCase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ )
UpperCAmelCase__ : int = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ )
else:
# middle
UpperCAmelCase__ : Union[str, Any] = self.mid_block(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : int = up_block(snake_case__ , snake_case__ )
# post-process
if latent_embeds is None:
UpperCAmelCase__ : List[Any] = self.conv_norm_out(snake_case__ )
else:
UpperCAmelCase__ : Any = self.conv_norm_out(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[Any] = self.conv_act(snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.conv_out(snake_case__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , snake_case__ : str , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]="random" , snake_case__ : Any=False , snake_case__ : Any=True ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = n_e
UpperCAmelCase__ : str = vq_embed_dim
UpperCAmelCase__ : List[Any] = beta
UpperCAmelCase__ : List[Any] = legacy
UpperCAmelCase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ : Optional[Any] = self.used.shape[0]
UpperCAmelCase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ : Union[str, Any] = self.re_embed
UpperCAmelCase__ : Optional[Any] = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ : int = n_e
UpperCAmelCase__ : Dict = sane_index_shape
def UpperCamelCase ( self : Tuple , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = inds.shape
assert len(snake_case__ ) > 1
UpperCAmelCase__ : Tuple = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ : Optional[Any] = self.used.to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ : Optional[int] = match.argmax(-1 )
UpperCAmelCase__ : str = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ : Union[str, Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ : Tuple = self.unknown_index
return new.reshape(snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = inds.shape
assert len(snake_case__ ) > 1
UpperCAmelCase__ : List[Any] = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ : int = self.used.to(snake_case__ )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ : Any = 0 # simply set to zero
UpperCAmelCase__ : List[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ )
return back.reshape(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ : str = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ : Dict = torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ : Any = self.embedding(snake_case__ ).view(z.shape )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ : Optional[int] = self.remap_to_used(snake_case__ )
UpperCAmelCase__ : Optional[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
if self.remap is not None:
UpperCAmelCase__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ : Dict = self.unmap_to_all(snake_case__ )
UpperCAmelCase__ : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ : Optional[Any] = self.embedding(snake_case__ )
if shape is not None:
UpperCAmelCase__ : List[str] = z_q.view(snake_case__ )
# reshape back to match original input shape
UpperCAmelCase__ : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str]=False ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parameters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = torch.chunk(snake_case__ , 2 , dim=1 )
UpperCAmelCase__ : int = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ : Optional[Any] = deterministic
UpperCAmelCase__ : Dict = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ : List[str] = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ : Dict = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[torch.Generator] = None ):
'''simple docstring'''
UpperCAmelCase__ : int = randn_tensor(
self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ : List[str] = self.mean + self.std * sample
return x
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCamelCase ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Tuple=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ : str = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.mean
| 199
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCAmelCase )} , )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCamelCase__: bool = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowerCamelCase__: str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCamelCase__: bool = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def UpperCAmelCase( self : List[str] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class a__ :
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCamelCase__: Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """The input training data file (a text file)."""} )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
lowerCamelCase__: Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
lowerCamelCase__: bool = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCamelCase__: Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
lowerCamelCase__: Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
lowerCamelCase__: Optional[int] = field(
default=_UpperCAmelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowerCamelCase__: float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowerCamelCase__: bool = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def UpperCAmelCase( self : Tuple ):
if self.train_file is not None:
a_ : Optional[int] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
a_ : List[str] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( __UpperCamelCase , __UpperCamelCase ):
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
a_ : Dict = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace())]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = {c: dataset[c] for c in dataset.column_names}
a_ : Union[str, Any] = refs
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def _a ( ):
a_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
a_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a_ : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
a_ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
a_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
a_ : int = {}
if data_args.train_file is not None:
a_ : Any = data_args.train_file
if data_args.validation_file is not None:
a_ : str = data_args.validation_file
a_ : Optional[int] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
a_ : Union[str, Any] = """text"""
a_ : Dict = load_dataset(SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : Any = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
a_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
a_ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
a_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
a_ : Any = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
a_ : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
a_ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
a_ : List[str] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
a_ : Optional[Any] = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
a_ : List[Any] = datasets["""train"""].column_names
else:
a_ : Tuple = datasets["""validation"""].column_names
a_ : Union[str, Any] = """text""" if """text""" in column_names else column_names[0]
a_ : Optional[Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(__UpperCamelCase ):
# Remove empty lines
a_ : List[Any] = [line for line in examples["""text"""] if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=data_args.max_seq_length )
a_ : Any = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
a_ : List[Any] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
a_ : Any = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
a_ : str = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
a_ : List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
a_ : List[Any] = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
a_ : str = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
a_ : Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
a_ : str = model_args.model_name_or_path
else:
a_ : Tuple = None
a_ : int = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model() # Saves the tokenizer too for easy upload
a_ : Optional[Any] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
a_ : Tuple = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
a_ : List[Any] = trainer.evaluate()
a_ : Optional[Any] = math.exp(eval_output["""eval_loss"""] )
a_ : List[str] = perplexity
a_ : List[Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def _a ( __UpperCamelCase ):
main()
if __name__ == "__main__":
main()
| 704
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCamelCase = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class a__ ( unittest.TestCase ):
def UpperCAmelCase( self : Dict ):
a_ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
a_ : Optional[Any] = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase_ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def UpperCAmelCase( self : Dict ):
a_ : Any = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple=None ):
a_ : Any = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
a_ : Any = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
a_ : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
a_ : Optional[Any] = black.format_str(lowerCamelCase_ , mode=lowerCamelCase_ )
a_ : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowerCamelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase_ )
with open(lowerCamelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCamelCase_ )
def UpperCAmelCase( self : Dict ):
a_ : Tuple = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[int] ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCamelCase_ ) , )
# Copy consistency with a really long name
a_ : int = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , lowerCamelCase_ , lowerCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCamelCase_ , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCamelCase_ ) , )
def UpperCAmelCase( self : int ):
a_ : Optional[Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
a_ : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
a_ : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
a_ : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
a_ , a_ : Optional[int] = check_copies.convert_to_localized_md(
lowerCamelCase_ , lowerCamelCase_ , localized_readme["""format_model_list"""] )
self.assertFalse(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
a_ , a_ : Tuple = check_copies.convert_to_localized_md(
lowerCamelCase_ , lowerCamelCase_ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase_ )
a_ : Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
a_ : Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
a_ : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
a_ , a_ : Tuple = check_copies.convert_to_localized_md(
lowerCamelCase_ , lowerCamelCase_ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 478
| 0
|
_A = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 258
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple ="""unispeech-sat"""
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=128 , lowerCAmelCase__=16 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__=320 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=100 , lowerCAmelCase__=256 , lowerCAmelCase__=256 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=256 , lowerCAmelCase__=(512, 512, 512, 512, 1500) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=504 , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
a : Any = hidden_size
a : Tuple = feat_extract_norm
a : Tuple = feat_extract_activation
a : Dict = list(lowerCAmelCase__ )
a : int = list(lowerCAmelCase__ )
a : Optional[Any] = list(lowerCAmelCase__ )
a : int = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[int] = len(self.conv_dim )
a : int = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : List[Any] = num_attention_heads
a : Any = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Tuple = activation_dropout
a : Dict = feat_proj_dropout
a : Optional[Any] = final_dropout
a : Union[str, Any] = layerdrop
a : str = layer_norm_eps
a : Optional[int] = initializer_range
a : Optional[int] = vocab_size
a : str = num_clusters
a : Any = do_stable_layer_norm
a : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[Any] = apply_spec_augment
a : int = mask_time_prob
a : Optional[int] = mask_time_length
a : Dict = mask_time_min_masks
a : Optional[int] = mask_feature_prob
a : List[str] = mask_feature_length
a : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a : List[str] = num_codevectors_per_group
a : List[Any] = num_codevector_groups
a : Tuple = contrastive_logits_temperature
a : int = feat_quantizer_dropout
a : Optional[Any] = num_negatives
a : Optional[int] = codevector_dim
a : Tuple = proj_codevector_dim
a : Optional[int] = diversity_loss_weight
# ctc loss
a : Dict = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : Union[str, Any] = list(lowerCAmelCase__ )
a : List[str] = list(lowerCAmelCase__ )
a : Tuple = list(lowerCAmelCase__ )
a : Optional[int] = xvector_output_dim
@property
def __a ( self ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 633
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any ="pegasus"
lowerCamelCase__ : Dict =["past_key_values"]
lowerCamelCase__ : str ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=1024 , lowerCamelCase=12 , lowerCamelCase=4096 , lowerCamelCase=16 , lowerCamelCase=12 , lowerCamelCase=4096 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=1024 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0_2 , lowerCamelCase=0 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=1 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : List[str] = vocab_size
__magic_name__ : int = max_position_embeddings
__magic_name__ : Optional[Any] = d_model
__magic_name__ : Dict = encoder_ffn_dim
__magic_name__ : int = encoder_layers
__magic_name__ : Tuple = encoder_attention_heads
__magic_name__ : int = decoder_ffn_dim
__magic_name__ : Any = decoder_layers
__magic_name__ : Tuple = decoder_attention_heads
__magic_name__ : List[Any] = dropout
__magic_name__ : str = attention_dropout
__magic_name__ : Any = activation_dropout
__magic_name__ : Optional[int] = activation_function
__magic_name__ : Any = init_std
__magic_name__ : Tuple = encoder_layerdrop
__magic_name__ : Tuple = decoder_layerdrop
__magic_name__ : Union[str, Any] = use_cache
__magic_name__ : Tuple = encoder_layers
__magic_name__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
@property
def lowercase ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase ( self ) -> int:
"""simple docstring"""
return self.d_model
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''CLIPFeatureExtractor''']
lowercase_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 336
| 0
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 16
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 16
| 1
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=9 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_0_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )->List[Any]:
'''simple docstring'''
A_ : List[str] = parent
A_ : List[Any] = batch_size
A_ : Dict = encoder_seq_length
A_ : Union[str, Any] = decoder_seq_length
# For common tests
A_ : Union[str, Any] = self.decoder_seq_length
A_ : Any = is_training
A_ : str = use_attention_mask
A_ : Union[str, Any] = use_labels
A_ : List[Any] = vocab_size
A_ : Dict = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = d_ff
A_ : Optional[Any] = relative_attention_num_buckets
A_ : Any = dropout_rate
A_ : List[str] = initializer_factor
A_ : List[str] = eos_token_id
A_ : Dict = pad_token_id
A_ : List[Any] = decoder_start_token_id
A_ : Optional[Any] = None
A_ : List[Any] = decoder_layers
def _snake_case ( self )->Any:
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )->Optional[Any]:
'''simple docstring'''
if attention_mask is None:
A_ : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A_ : Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A_ : Any = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
A_ : str = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
A_ : str = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
A_ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A_ : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 )
A_ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
A_ : Optional[Any] = self.get_config()
A_ : Tuple = config.num_attention_heads
A_ : int = self.prepare_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, input_dict
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ , A_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self )->Any:
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[Any] = UMTaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : List[str] = model(
input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , )
A_ : List[str] = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
A_ : str = result.last_hidden_state
A_ : Tuple = result.past_key_values
A_ : Optional[int] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_SCREAMING_SNAKE_CASE ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
A_ : Optional[int] = UMTaModel(config=_SCREAMING_SNAKE_CASE ).get_decoder().to(_SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
A_ : Dict = model(_SCREAMING_SNAKE_CASE )
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) + 1 )
A_ , A_ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
A_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Any = model(_SCREAMING_SNAKE_CASE )['''last_hidden_state''']
A_ : int = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
A_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
A_ : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )->Any:
'''simple docstring'''
A_ : Optional[int] = UMTaModel(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).half().eval()
A_ : List[Any] = model(**_SCREAMING_SNAKE_CASE )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(_SCREAMING_SNAKE_CASE ).any().item() )
@require_torch
class _lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case = True
snake_case = False
snake_case = False
snake_case = True
snake_case = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case = [0.8, 0.9]
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : List[str] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
A_ : int = UMTaModel(config_and_inputs[0] ).to(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_SCREAMING_SNAKE_CASE , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=_SCREAMING_SNAKE_CASE , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
A_ : Optional[int] = config_and_inputs[0]
A_ : Any = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval()
model.to(_SCREAMING_SNAKE_CASE )
A_ : Any = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
}
for attn_name, (name, mask) in zip(_SCREAMING_SNAKE_CASE , head_masking.items() ):
A_ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A_ : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A_ : str = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=_SCREAMING_SNAKE_CASE , legacy=_SCREAMING_SNAKE_CASE )
A_ : Dict = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
A_ : List[Any] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=_SCREAMING_SNAKE_CASE ).input_ids
# fmt: off
A_ : Tuple = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = model.generate(input_ids.to(_SCREAMING_SNAKE_CASE ) )
A_ : Tuple = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
A_ : Dict = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 701
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "t5"
snake_case = ["past_key_values"]
snake_case = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _SCREAMING_SNAKE_CASE=3_2128 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : str = vocab_size
A_ : Tuple = d_model
A_ : Tuple = d_kv
A_ : str = d_ff
A_ : str = num_layers
A_ : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : str = num_heads
A_ : Optional[int] = relative_attention_num_buckets
A_ : int = relative_attention_max_distance
A_ : str = dropout_rate
A_ : int = layer_norm_epsilon
A_ : List[str] = initializer_factor
A_ : Optional[int] = feed_forward_proj
A_ : Optional[int] = use_cache
A_ : Tuple = self.feed_forward_proj.split('''-''' )
A_ : List[Any] = act_info[-1]
A_ : str = act_info[0] == '''gated'''
if len(_SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(_SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A_ : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ : List[str] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A_ : str = '''past_encoder_sequence + sequence'''
A_ : Any = {0: '''batch'''}
A_ : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return 13
| 152
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12
|
"""simple docstring"""
import string
def _UpperCamelCase ( A ):
UpperCamelCase_ =""
for i in sequence:
UpperCamelCase_ =ord(A )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _UpperCamelCase ( A ):
UpperCamelCase_ =string.ascii_letters
UpperCamelCase_ =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(A )] if c in letters else c for c in sequence )
def _UpperCamelCase ( ):
from timeit import timeit
print("Running performance benchmarks..." )
UpperCamelCase_ ="from string import printable ; from __main__ import atbash, atbash_slow"
print(f"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=A )} seconds""" )
print(f"""> atbash(): {timeit("atbash(printable)" , setup=A )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 391
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ : str = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ : Optional[Any] = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class a__ ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] =VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict =["""input_ids""", """attention_mask"""]
UpperCAmelCase__ : str =RobertaTokenizer
def __init__( self : Tuple , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="replace" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Union[str, Any]="</s>" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : int="<mask>" , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : int , ) ->int:
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : int = getattr(UpperCAmelCase__ , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = add_prefix_space
SCREAMING_SNAKE_CASE : List[str] = pre_tok_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : int = "post_processor"
SCREAMING_SNAKE_CASE : Any = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(state["""cls"""] )
SCREAMING_SNAKE_CASE : Optional[int] = False
if state.get("""add_prefix_space""" , UpperCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Optional[Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Tuple = True
if state.get("""trim_offsets""" , UpperCAmelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[str] = trim_offsets
SCREAMING_SNAKE_CASE : str = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(UpperCAmelCase__ , state.pop("""type""" ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
def _lowercase ( self : List[str] ) ->str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def _lowercase ( self : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any] ) ->BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = kwargs.get("""is_split_into_words""" , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) ->BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = kwargs.get("""is_split_into_words""" , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=None ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 704
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase__ : int = get_tests_dir("""fixtures""")
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = mock.Mock()
SCREAMING_SNAKE_CASE : Optional[int] = 5_0_0
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : List[Any] = HTTPError
SCREAMING_SNAKE_CASE : int = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=UpperCAmelCase__ ) as mock_head:
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Optional[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class a__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowercase ( cls : List[str] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TOKEN
HfFolder.save_token(UpperCAmelCase__ )
@classmethod
def _lowercase ( cls : List[str] ) ->Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def _lowercase ( self : int ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__ )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Any = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCAmelCase__ , repo_id="""test-feature-extractor""" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _lowercase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__ )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCAmelCase__ , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _lowercase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Any = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__ )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=UpperCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 446
| 0
|
import os
import sys
import unittest
a__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a__ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
_a : Optional[Any] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_a , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_a : str = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_a , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_a : Optional[int] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_a , '''torch_and_transformers_and_onnx''' )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _a )
self.assertIn('''torch_and_transformers''' , _a )
self.assertIn('''flax_and_transformers''' , _a )
self.assertIn('''torch_and_transformers_and_onnx''' , _a )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_a , '''\nCONSTANT = None\n''' )
_a : int = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_a , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_a : Tuple = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_a : Dict = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_a : List[str] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _a )
| 14
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase : Dict =parse(importlib.metadata.version('''torch'''))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
UpperCamelCase__ : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : List[str] = parse(importlib.metadata.version(__lowerCAmelCase ) )
return operation(__lowerCAmelCase , parse(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 228
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowercase ( _A ) -> Optional[int]:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : str = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 446
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A__ ( a__ ):
"""simple docstring"""
__A : Dict = 4_2
__A : Optional[Any] = 4_2
class A__ ( a__ , a__ ):
"""simple docstring"""
__A : Dict = 1
@register_to_config
def __init__( self , lowercase = 2000 , lowercase = 0.15 , lowercase = 0.01 , lowercase = 1348.0 , lowercase = 1e-5 , lowercase = 1 , ) -> List[str]:
'''simple docstring'''
a__ : Any = sigma_max
# setable values
a__ : Any = None
self.set_sigmas(__a , __a , __a , __a)
def __lowercase ( self , lowercase , lowercase = None) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __lowercase ( self , lowercase , lowercase = None , lowercase = None) -> List[str]:
'''simple docstring'''
a__ : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
a__ : Any = torch.linspace(1 , __a , __a , device=__a)
def __lowercase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
a__ : Optional[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
a__ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a , __a)
a__ : Optional[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
a__ : Optional[Any] = torch.exp(torch.linspace(math.log(__a) , math.log(__a) , __a))
a__ : Optional[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def __lowercase ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
a__ : Dict = timestep * torch.ones(
sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
a__ : List[str] = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
a__ : Optional[Any] = timesteps.to(self.discrete_sigmas.device)
a__ : Tuple = self.discrete_sigmas[timesteps].to(sample.device)
a__ : int = self.get_adjacent_sigma(__a , __a).to(sample.device)
a__ : int = torch.zeros_like(__a)
a__ : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
a__ : List[str] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
a__ : Dict = diffusion.unsqueeze(-1)
a__ : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
a__ : Dict = randn_tensor(
sample.shape , layout=sample.layout , generator=__a , device=sample.device , dtype=sample.dtype)
a__ : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
a__ : List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a , prev_sample_mean=__a)
def __lowercase ( self , lowercase , lowercase , lowercase = None , lowercase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
a__ : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
a__ : Optional[int] = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean()
a__ : List[str] = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean()
a__ : Any = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
a__ : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
a__ : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
a__ : List[Any] = step_size.unsqueeze(-1)
a__ : Optional[int] = sample + step_size * model_output
a__ : Optional[int] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def __lowercase ( self , lowercase , lowercase , lowercase , ) -> torch.FloatTensor:
'''simple docstring'''
a__ : List[Any] = timesteps.to(original_samples.device)
a__ : Optional[Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
a__ : Optional[int] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
a__ : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self) -> Any:
'''simple docstring'''
return self.config.num_train_timesteps
| 302
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = ["image_processor", "tokenizer"]
_a = "LayoutLMv2ImageProcessor"
_a = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Any , __a : str=None , __a : List[Any]=None , **__a : Optional[int] ) ->int:
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __a , )
lowerCamelCase_ : str = kwargs.pop("""feature_extractor""" )
lowerCamelCase_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__a , __a )
def __call__( self : List[str] , __a : List[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Any , ) ->BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
lowerCamelCase_ : Tuple = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
lowerCamelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ : Dict = features["""words"""]
lowerCamelCase_ : int = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
lowerCamelCase_ : Any = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
lowerCamelCase_ : Dict = self.get_overflowing_images(__a , encoded_inputs["""overflow_to_sample_mapping"""] )
lowerCamelCase_ : Optional[int] = images
return encoded_inputs
def _lowerCAmelCase ( self : Optional[Any] , __a : str , __a : Union[str, Any] ) ->List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase_ : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F''' {len(__a )} and {len(__a )}''' )
return images_with_overflow
def _lowerCAmelCase ( self : Dict , *__a : Dict , **__a : int ) ->int:
return self.tokenizer.batch_decode(*__a , **__a )
def _lowerCAmelCase ( self : List[Any] , *__a : List[str] , **__a : int ) ->List[Any]:
return self.tokenizer.decode(*__a , **__a )
@property
def _lowerCAmelCase ( self : int ) ->List[str]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCAmelCase ( self : int ) ->str:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __a , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self : int ) ->List[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __a , )
return self.image_processor
| 278
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 257
|
'''simple docstring'''
from __future__ import annotations
import bisect
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
if hi < 0:
_UpperCAmelCase : int = len(a_ )
while lo < hi:
_UpperCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCAmelCase : str = mid + 1
else:
_UpperCAmelCase : int = mid
return lo
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
if hi < 0:
_UpperCAmelCase : str = len(a_ )
while lo < hi:
_UpperCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCAmelCase : Tuple = mid + 1
else:
_UpperCAmelCase : Union[str, Any] = mid
return lo
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
sorted_collection.insert(bisect_left(a_, a_, a_, a_ ), a_ )
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
sorted_collection.insert(bisect_right(a_, a_, a_, a_ ), a_ )
def __UpperCAmelCase ( a_: list[int], a_: int ):
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Tuple = len(a_ ) - 1
while left <= right:
_UpperCAmelCase : List[str] = left + (right - left) // 2
_UpperCAmelCase : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCAmelCase : Optional[int] = midpoint - 1
else:
_UpperCAmelCase : Union[str, Any] = midpoint + 1
return None
def __UpperCAmelCase ( a_: list[int], a_: int ):
_UpperCAmelCase : int = bisect.bisect_left(a_, a_ )
if index != len(a_ ) and sorted_collection[index] == item:
return index
return None
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int, a_: int ):
if right < left:
return None
_UpperCAmelCase : Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(a_, a_, a_, midpoint - 1 )
else:
return binary_search_by_recursion(a_, a_, midpoint + 1, a_ )
if __name__ == "__main__":
__a = input('Enter numbers separated by comma:\n').strip()
__a = sorted(int(item) for item in user_input.split(','))
__a = int(input('Enter a single number to be found in the list:\n'))
__a = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.')
| 257
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
def __UpperCAmelCase ( A : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(A , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase_ : str = tf.shape(A )
if tensor.shape == tf.TensorShape(A ):
return dynamic
UpperCAmelCase_ : Any = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(A )]
def __UpperCAmelCase ( A : tf.Tensor , A : Optional[int] = None , A : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=A , name=A )
def __UpperCAmelCase ( A : List[str] , A : Union[str, Any] , A : List[str] , A : str=1e-5 , A : Dict=-1 ) -> List[Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(A , A ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase_ , UpperCAmelCase_ : int = tf.nn.moments(A , axes=[axis] , keepdims=A )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase_ : Tuple = [1] * inputs.shape.rank
UpperCAmelCase_ : int = shape_list(A )[axis]
UpperCAmelCase_ : Dict = tf.reshape(A , A )
UpperCAmelCase_ : Optional[Any] = tf.reshape(A , A )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase_ : Dict = tf.nn.batch_normalization(
A , A , A , offset=A , scale=A , variance_epsilon=A , )
return outputs
def __UpperCAmelCase ( A : Dict , A : int=0 , A : Optional[int]=-1 ) -> str:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase_ : Any = tf.shape(A )
UpperCAmelCase_ : Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase_ : Optional[int] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(A , A )
def __UpperCAmelCase ( A : tf.Tensor ) -> tf.Tensor:
if not isinstance(A , tf.Tensor ):
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor(A ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase_ : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase_ : Tuple = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase_ : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCAmelCase ( A : tf.Tensor , A : int , A : str = "input_ids" ) -> None:
tf.debugging.assert_less(
A , tf.cast(A , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(A )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def __UpperCAmelCase ( A : Dict , A : Tuple , A : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase_ : Tuple = [x for x in data if len(A ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
UpperCAmelCase_ : Union[str, Any] = np.asarray(A )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : str = np.array_split(A , A )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase_ : Dict = np.array_split(A , A )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(A ):
UpperCAmelCase_ : int = chunk_data
else:
UpperCAmelCase_ : List[Any] = data
def __UpperCAmelCase ( A : int , A : Optional[Any] ) -> Tuple:
if name in group.attrs:
UpperCAmelCase_ : Optional[int] = [n.decode('''utf8''' ) if hasattr(A , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(A , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __UpperCAmelCase ( A : Tuple ) -> str:
def _expand_single_ad_tensor(A : int ):
if isinstance(A , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(A , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , A )
| 541
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_UpperCamelCase : Union[str, Any] = 'examples/'
_UpperCamelCase : List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_UpperCamelCase : Optional[Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_UpperCamelCase : Any = 'README.md'
def __UpperCAmelCase ( A : Optional[int] , A : List[Any] , A : Tuple ) -> Dict:
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ : int = f.read()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase_ : Tuple = replace.replace('''VERSION''' , A )
UpperCAmelCase_ : Tuple = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def __UpperCAmelCase ( A : List[str] ) -> Any:
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def __UpperCAmelCase ( A : Tuple , A : Optional[int]=False ) -> Tuple:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : List[str] = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase_ : int = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
# Find the start of the list.
UpperCAmelCase_ : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_ : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase_ : Optional[Any] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def __UpperCAmelCase ( ) -> Dict:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase_ : Any = f.read()
UpperCAmelCase_ : Union[str, Any] = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def __UpperCAmelCase ( A : str=False ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase_ : Optional[int] = default_version.base_version
elif patch:
UpperCAmelCase_ : Any = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase_ : Optional[Any] = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase_ : List[str] = input(F"Which version are you releasing? [{default_version}]" )
if len(A ) == 0:
UpperCAmelCase_ : Tuple = default_version
print(F"Updating version to {version}." )
global_version_update(A , patch=A )
def __UpperCAmelCase ( ) -> Optional[int]:
UpperCAmelCase_ : str = get_version()
UpperCAmelCase_ : Tuple = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase_ : Any = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_ : List[Any] = input(F"Which version are we developing now? [{dev_version}]" )
if len(A ) == 0:
UpperCAmelCase_ : Optional[int] = dev_version
print(F"Updating version to {version}." )
global_version_update(A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 541
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = "xlm-roberta-xl"
def __init__( self , _A=250880 , _A=2560 , _A=36 , _A=32 , _A=10240 , _A="gelu" , _A=0.1 , _A=0.1 , _A=514 , _A=1 , _A=0.02 , _A=1e-05 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , **_A , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = position_embedding_type
_UpperCAmelCase : int = use_cache
_UpperCAmelCase : int = classifier_dropout
class A_ ( __lowercase ):
'''simple docstring'''
@property
def snake_case__ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 713
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class A_ ( unittest.TestCase , __lowercase ):
'''simple docstring'''
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase : int = load_tool('''text-question-answering''')
self.tool.setup()
_UpperCAmelCase : Any = load_tool('''text-question-answering''' , remote=_A)
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.tool(_A , '''What did Hugging Face do in April 2021?''')
self.assertEqual(_A , '''launched the BigScience Research Workshop''')
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : str = self.remote_tool(_A , '''What did Hugging Face do in April 2021?''')
self.assertEqual(_A , '''launched the BigScience Research Workshop''')
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.tool(text=_A , question='''What did Hugging Face do in April 2021?''')
self.assertEqual(_A , '''launched the BigScience Research Workshop''')
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.remote_tool(text=_A , question='''What did Hugging Face do in April 2021?''')
self.assertEqual(_A , '''launched the BigScience Research Workshop''')
| 186
| 0
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCAmelCase = logging.getLogger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = '''sequence-classification'''
def __init__( self ,__UpperCAmelCase ) -> List[str]:
if type(_lowerCAmelCase ) == dict:
lowerCAmelCase__ : Optional[int] = Namespace(**_lowerCAmelCase )
lowerCAmelCase__ : int = glue_output_modes[hparams.task]
lowerCAmelCase__ : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(_lowerCAmelCase ,_lowerCAmelCase ,self.mode )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Optional[int]:
return self.model(**_lowerCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ : str = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase__ : Optional[int] = self(**_lowerCAmelCase )
lowerCAmelCase__ : int = outputs[0]
lowerCAmelCase__ : Optional[int] = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase__ : List[str] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = self.hparams
lowerCAmelCase__ : str = processors[args.task]()
lowerCAmelCase__ : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase__ : str = self._feature_file(_lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,_lowerCAmelCase )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
lowerCAmelCase__ : int = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
lowerCAmelCase__ : List[str] = convert_examples_to_features(
_lowerCAmelCase ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,_lowerCAmelCase )
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> DataLoader:
lowerCAmelCase__ : str = """dev""" if mode == """test""" else mode
lowerCAmelCase__ : Optional[Any] = self._feature_file(_lowerCAmelCase )
logger.info("""Loading features from cached file %s""" ,_lowerCAmelCase )
lowerCAmelCase__ : List[Any] = torch.load(_lowerCAmelCase )
lowerCAmelCase__ : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
lowerCAmelCase__ : Optional[Any] = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ : List[Any] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) ,batch_size=_lowerCAmelCase ,shuffle=_lowerCAmelCase ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ : Dict = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase__ : List[str] = self(**_lowerCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = outputs[:2]
lowerCAmelCase__ : List[str] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Tuple = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> tuple:
lowerCAmelCase__ : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
lowerCAmelCase__ : Dict = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ : Any = np.argmax(_lowerCAmelCase ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ : Union[str, Any] = np.squeeze(_lowerCAmelCase )
lowerCAmelCase__ : Tuple = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
lowerCAmelCase__ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,_lowerCAmelCase ,_lowerCAmelCase )}
lowerCAmelCase__ : Union[str, Any] = dict(results.items() )
lowerCAmelCase__ : Tuple = results
return ret, preds_list, out_label_list
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self._eval_end(_lowerCAmelCase )
lowerCAmelCase__ : int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._eval_end(_lowerCAmelCase )
lowerCAmelCase__ : Any = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(_lowerCAmelCase ,_lowerCAmelCase )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=_lowerCAmelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=_lowerCAmelCase ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
lowerCAmelCase__ : Any = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase__ : int = os.path.join(
"""./results""" , f"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
lowerCAmelCase__ : List[str] = GLUETransformer(lowerCAmelCase_ )
lowerCAmelCase__ : Optional[Any] = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase__ : Dict = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
lowerCAmelCase__ : List[str] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 565
|
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(lowerCAmelCase_ ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_lowercase : Union[str, Any] = 192
_lowercase : str = 768
_lowercase : str = 12
_lowercase : int = 3
_lowercase : Tuple = [800, 1_333]
_lowercase : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
_lowercase : List[str] = 330
_lowercase : Tuple = 14
_lowercase : Any = 6
_lowercase : Optional[Any] = 1_320
elif "yolos_s" in yolos_name:
_lowercase : Optional[int] = 384
_lowercase : int = 1_536
_lowercase : Optional[int] = 12
_lowercase : Dict = 6
elif "yolos_b" in yolos_name:
_lowercase : Tuple = [800, 1_344]
_lowercase : Any = 91
_lowercase : int = 'huggingface/label-files'
_lowercase : int = 'coco-detection-id2label.json'
_lowercase : int = json.load(open(hf_hub_download(__UpperCAmelCase ,__UpperCAmelCase ,repo_type='dataset' ) ,'r' ) )
_lowercase : int = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowercase : Optional[int] = idalabel
_lowercase : List[str] = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_lowercase : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Dict = in_proj_weight[: config.hidden_size, :]
_lowercase : Any = in_proj_bias[: config.hidden_size]
_lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase : Any = in_proj_weight[-config.hidden_size :, :]
_lowercase : int = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
if "backbone" in name:
_lowercase : Any = name.replace('backbone' ,'vit' )
if "cls_token" in name:
_lowercase : int = name.replace('cls_token' ,'embeddings.cls_token' )
if "det_token" in name:
_lowercase : int = name.replace('det_token' ,'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
_lowercase : Dict = name.replace('mid_pos_embed' ,'encoder.mid_position_embeddings' )
if "pos_embed" in name:
_lowercase : List[Any] = name.replace('pos_embed' ,'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj' ,'embeddings.patch_embeddings.projection' )
if "blocks" in name:
_lowercase : Optional[Any] = name.replace('blocks' ,'encoder.layer' )
if "attn.proj" in name:
_lowercase : Optional[Any] = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name:
_lowercase : str = name.replace('attn' ,'attention.self' )
if "norm1" in name:
_lowercase : Any = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
_lowercase : Optional[Any] = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : str = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : Any = name.replace('mlp.fc2' ,'output.dense' )
if "class_embed" in name:
_lowercase : List[str] = name.replace('class_embed' ,'class_labels_classifier' )
if "bbox_embed" in name:
_lowercase : Any = name.replace('bbox_embed' ,'bbox_predictor' )
if "vit.norm" in name:
_lowercase : List[str] = name.replace('vit.norm' ,'vit.layernorm' )
return name
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowercase : str = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowercase : Any = key.split('.' )
_lowercase : int = int(key_split[2] )
_lowercase : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_lowercase : Tuple = val[:dim, :]
_lowercase : Dict = val[
dim : dim * 2, :
]
_lowercase : Optional[int] = val[-dim:, :]
else:
_lowercase : Dict = val[:dim]
_lowercase : Optional[Any] = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
else:
_lowercase : int = val
return orig_state_dict
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = Image.open(requests.get(__UpperCAmelCase ,stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = False ):
"""simple docstring"""
_lowercase : Optional[Any] = get_yolos_config(__UpperCAmelCase )
# load original state_dict
_lowercase : List[Any] = torch.load(__UpperCAmelCase ,map_location='cpu' )['model']
# load 🤗 model
_lowercase : str = YolosForObjectDetection(__UpperCAmelCase )
model.eval()
_lowercase : Any = convert_state_dict(__UpperCAmelCase ,__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by YolosImageProcessor
_lowercase : Optional[int] = 800 if yolos_name != 'yolos_ti' else 512
_lowercase : str = YolosImageProcessor(format='coco_detection' ,size=__UpperCAmelCase )
_lowercase : Optional[int] = image_processor(images=prepare_img() ,return_tensors='pt' )
_lowercase : str = model(**__UpperCAmelCase )
_lowercase , _lowercase : int = outputs.logits, outputs.pred_boxes
_lowercase , _lowercase : Any = None, None
if yolos_name == "yolos_ti":
_lowercase : Optional[int] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
_lowercase : Dict = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
_lowercase : Optional[Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
_lowercase : Tuple = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
_lowercase : Optional[Any] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
_lowercase : Optional[Any] = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
_lowercase : int = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
_lowercase : Dict = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
_lowercase : Dict = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
_lowercase : str = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] ,__UpperCAmelCase ,atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] ,__UpperCAmelCase ,atol=1E-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
_lowercase : Tuple = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
_lowercase : str = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCAmelCase ,organization='hustvl' )
model.push_to_hub(__UpperCAmelCase ,organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 283
|
"""simple docstring"""
from __future__ import annotations
class _lowerCamelCase :
def __init__( self : Any , lowerCamelCase_ : list[list[int]] ):
"""simple docstring"""
_lowercase : Tuple = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowerCamelCase_ ) != 0:
_lowercase : Dict = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase_ ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise error
_lowercase : int = rows
else:
_lowercase : Optional[Any] = []
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return len(self.rows )
@property
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return len(self.rows[0] )
@property
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return self.order[0] == self.order[1]
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return bool(self.determinant() )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : List[str] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase_ ).determinant()
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase_ , lowerCamelCase_ )
return -1 * self.get_minor(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowerCamelCase_ , lowerCamelCase_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : str = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Dict = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ):
"""simple docstring"""
return str(self.rows )
def __str__( self : str ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowerCamelCase_ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : list[int] , lowerCamelCase_ : int | None = None ):
"""simple docstring"""
_lowercase : Union[str, Any] = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise type_error
if len(lowerCamelCase_ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowerCamelCase_ )
else:
_lowercase : Any = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCAmelCase ( self : str , lowerCamelCase_ : list[int] , lowerCamelCase_ : int | None = None ):
"""simple docstring"""
_lowercase : Tuple = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise type_error
if len(lowerCamelCase_ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
_lowercase : List[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_lowercase : Any = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Union[str, Any] , lowerCamelCase_ : object ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , lowerCamelCase_ : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : Optional[int] , lowerCamelCase_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , lowerCamelCase_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Optional[Any] , lowerCamelCase_ : Matrix | int | float ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase_ , lowerCamelCase_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Optional[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
_lowercase : str = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __UpperCAmelCase ( cls : str , lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Any = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : int = """config.json"""
lowerCamelCase_ : Dict = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : int = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Union[str, Any] = """model.onnx"""
lowerCamelCase_ : int = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Any = """weights.pb"""
lowerCamelCase_ : Tuple = """https://huggingface.co"""
lowerCamelCase_ : Dict = default_cache_path
lowerCamelCase_ : List[Any] = """diffusers_modules"""
lowerCamelCase_ : str = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : int = """.self_attn"""
| 559
|
from math import pow, sqrt
def lowerCAmelCase( *__lowerCamelCase ):
__a = len(__lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 559
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCamelCase__ = 4
UpperCamelCase__ = 48
UpperCamelCase__ = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase__ = [6, 6, 6, 6]
UpperCamelCase__ = 60
UpperCamelCase__ = [6, 6, 6, 6]
UpperCamelCase__ = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase__ = 4
UpperCamelCase__ = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = 126
UpperCamelCase__ = 7
UpperCamelCase__ = 2_5_5.0
UpperCamelCase__ = """"""
return config
def __magic_name__( _A , _A ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCamelCase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCamelCase__ = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
UpperCamelCase__ = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
UpperCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCamelCase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
UpperCamelCase__ = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
UpperCamelCase__ = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
UpperCamelCase__ = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
UpperCamelCase__ = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
UpperCamelCase__ = """layernorm.weight"""
if name == "norm.bias":
UpperCamelCase__ = """layernorm.bias"""
if "conv_first" in name:
UpperCamelCase__ = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCamelCase__ = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCamelCase__ = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
UpperCamelCase__ = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
UpperCamelCase__ = name.replace("""upsample.2""" , """upsample.convolution_1""" )
UpperCamelCase__ = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCamelCase__ = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
UpperCamelCase__ = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
UpperCamelCase__ = """swin2sr.""" + name
return name
def __magic_name__( _A , _A ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(_A )
if "qkv" in key:
UpperCamelCase__ = key.split(""".""" )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[4] )
UpperCamelCase__ = config.embed_dim
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[dim : dim * 2]
UpperCamelCase__ = val[-dim:]
pass
else:
UpperCamelCase__ = val
return orig_state_dict
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = get_config(_A )
UpperCamelCase__ = SwinaSRForImageSuperResolution(_A )
model.eval()
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )
UpperCamelCase__ = convert_state_dict(_A , _A )
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(_A , strict=_A )
if len(_A ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(_A ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
UpperCamelCase__ = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCamelCase__ = Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" )
UpperCamelCase__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCamelCase__ = 126 if """Jpeg""" in checkpoint_url else 256
UpperCamelCase__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCamelCase__ = transforms(_A ).unsqueeze(0 )
if config.num_channels == 1:
UpperCamelCase__ = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCamelCase__ = model(_A )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCamelCase__ = torch.Size([1, 3, 512, 512] )
UpperCamelCase__ = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase__ = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase__ = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCamelCase__ = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase__ = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase__ = torch.Size([1, 3, 512, 512] )
UpperCamelCase__ = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase__ = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase__ = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _A , atol=1e-3 )
print("""Looks ok!""" )
UpperCamelCase__ = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCamelCase__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 265
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : List[str] , lowercase : List[Any]=1_3 , lowercase : Union[str, Any]=7 , lowercase : Dict=True , lowercase : Optional[int]=True , lowercase : List[Any]=True , lowercase : Dict=True , lowercase : List[str]=9_9 , lowercase : Dict=1_6 , lowercase : Dict=3_6 , lowercase : str=6 , lowercase : List[Any]=6 , lowercase : int=6 , lowercase : Union[str, Any]=3_7 , lowercase : Union[str, Any]="gelu" , lowercase : List[Any]=0.1 , lowercase : List[str]=0.1 , lowercase : str=5_1_2 , lowercase : Any=1_6 , lowercase : str=2 , lowercase : List[Any]=0.0_2 , lowercase : Tuple=3 , lowercase : Dict=4 , lowercase : Dict=None , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_hidden_groups
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def A ( self : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Dict , lowercase : str , lowercase : int , lowercase : List[str] , lowercase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
UpperCamelCase__ = model(lowercase , token_type_ids=lowercase )
UpperCamelCase__ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : int , lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , sentence_order_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def A ( self : Optional[int] , lowercase : Dict , lowercase : List[Any] , lowercase : Any , lowercase : List[str] , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
'''simple docstring'''
UpperCamelCase__ = AlbertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AlbertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any , lowercase : Optional[Any] , lowercase : Any , lowercase : Dict , lowercase : Any , lowercase : Optional[int] , lowercase : str , lowercase : Dict ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = AlbertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] , lowercase : int , lowercase : Any , lowercase : Tuple , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = AlbertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Any , lowercase : List[str] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = AlbertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
__a : Any = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : List[Any] = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = True
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : Optional[Any]=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=lowercase , hidden_size=3_7 )
def A ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def A ( self : Tuple ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def A ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def A ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def A ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*lowercase )
@slow
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = AlbertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCamelCase__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(lowercase , attention_mask=lowercase )[0]
UpperCamelCase__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowercase )
UpperCamelCase__ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) )
| 265
| 1
|
import argparse
import json
from tqdm import tqdm
def __snake_case ( ):
"""simple docstring"""
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" ,type=__UpperCamelCase ,default="biencoder-nq-dev.json" ,help="Path to raw DPR training data" ,)
parser.add_argument(
"--evaluation_set" ,type=__UpperCamelCase ,help="where to store parsed evaluation_set file" ,)
parser.add_argument(
"--gold_data_path" ,type=__UpperCamelCase ,help="where to store parsed gold_data_path file" ,)
A_ = parser.parse_args()
with open(args.src_path ,"r" ) as src_file, open(args.evaluation_set ,"w" ) as eval_file, open(
args.gold_data_path ,"w" ) as gold_file:
A_ = json.load(__UpperCamelCase )
for dpr_record in tqdm(__UpperCamelCase ):
A_ = dpr_record["question"]
A_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__UpperCamelCase ) + "\n" )
if __name__ == "__main__":
main()
| 86
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 86
| 1
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
__a : int = nn.functional.normalize(lowercase )
__a : Any = nn.functional.normalize(lowercase )
return torch.mm(lowercase , normalized_text_embeds.t() )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = CLIPConfig
lowercase__ = ["CLIPEncoderLayer"]
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
super().__init__(__UpperCamelCase )
__a : Optional[int] = CLIPVisionModel(config.vision_config )
__a : str = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__UpperCamelCase )
__a : int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__UpperCamelCase )
__a : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__UpperCamelCase )
__a : Union[str, Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__UpperCamelCase )
__a : Union[str, Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__UpperCamelCase )
@torch.no_grad()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = self.vision_model(__UpperCamelCase )[1] # pooled_output
__a : str = self.visual_projection(__UpperCamelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : Optional[Any] = cosine_distance(__UpperCamelCase , self.special_care_embeds ).cpu().float().numpy()
__a : Optional[Any] = cosine_distance(__UpperCamelCase , self.concept_embeds ).cpu().float().numpy()
__a : Optional[int] = []
__a : Tuple = image_embeds.shape[0]
for i in range(__UpperCamelCase ):
__a : Optional[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__a : str = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__a : Dict = special_cos_dist[i][concept_idx]
__a : List[str] = self.special_care_embeds_weights[concept_idx].item()
__a : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
__a : Tuple = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
__a : List[str] = cos_dist[i][concept_idx]
__a : Optional[Any] = self.concept_embeds_weights[concept_idx].item()
__a : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__UpperCamelCase )
result.append(__UpperCamelCase )
__a : Dict = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = self.vision_model(__UpperCamelCase )[1] # pooled_output
__a : Tuple = self.visual_projection(__UpperCamelCase )
__a : int = cosine_distance(__UpperCamelCase , self.special_care_embeds )
__a : int = cosine_distance(__UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__a : Dict = 0.0
__a : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__a : Any = torch.any(special_scores > 0 , dim=1 )
__a : List[str] = special_care * 0.0_1
__a : List[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__a : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__a : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 719
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 697
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= params
__lowercase= np.array(lowerCAmelCase )
__lowercase= np.array([len(lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__(self , lowerCAmelCase ):
return (self.token_ids[index], self.lengths[index])
def __len__(self ):
return len(self.lengths )
def _A (self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _A (self ):
__lowercase= self.params.max_model_input_size
__lowercase= self.lengths > max_len
logger.info(f'Splitting {sum(lowerCAmelCase )} too long sequences.' )
def divide_chunks(lowerCAmelCase , lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase )]
__lowercase= []
__lowercase= []
if self.params.mlm:
__lowercase, __lowercase= self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
__lowercase, __lowercase= self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__lowercase= []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__lowercase= np.insert(lowerCAmelCase , 0 , lowerCAmelCase )
if sub_s[-1] != sep_id:
__lowercase= np.insert(lowerCAmelCase , len(lowerCAmelCase ) , lowerCAmelCase )
assert len(lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase )
new_tok_ids.extend(lowerCAmelCase )
new_lengths.extend([len(lowerCAmelCase ) for l in sub_seqs] )
__lowercase= np.array(lowerCAmelCase )
__lowercase= np.array(lowerCAmelCase )
def _A (self ):
__lowercase= len(self )
__lowercase= self.lengths > 1_1
__lowercase= self.token_ids[indices]
__lowercase= self.lengths[indices]
__lowercase= len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def _A (self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
__lowercase= self.params.special_tok_ids['unk_token']
__lowercase= len(self )
__lowercase= np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__lowercase= (unk_occs / self.lengths) < 0.5
__lowercase= self.token_ids[indices]
__lowercase= self.lengths[indices]
__lowercase= len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def _A (self ):
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _A (self , lowerCAmelCase ):
__lowercase= [t[0] for t in batch]
__lowercase= [t[1] for t in batch]
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
# Max for paddings
__lowercase= max(lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__lowercase= self.params.special_tok_ids['pad_token']
else:
__lowercase= self.params.special_tok_ids['unk_token']
__lowercase= [list(t.astype(lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase )
assert all(len(lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__lowercase= torch.tensor(tk_ ) # (bs, max_seq_len_)
__lowercase= torch.tensor(lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 230
|
from math import ceil
def _lowerCamelCase( lowercase__ = 1_0_0_1 ) -> int:
'''simple docstring'''
__lowercase= 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowercase= 2 * i + 1
__lowercase= 2 * i
__lowercase= total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 230
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __magic_name__ ( _a):
def __init__( self : int ,__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = data
def __iter__( self : Optional[int] ):
for element in self.data:
yield element
def __UpperCamelCase ( _lowerCAmelCase=True ):
"""simple docstring"""
UpperCAmelCase = Accelerator(even_batches=_lowerCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ):
"""simple docstring"""
if iterable:
UpperCAmelCase = DummyIterableDataset(torch.as_tensor(range(_lowerCAmelCase ) ) )
else:
UpperCAmelCase = TensorDataset(torch.as_tensor(range(_lowerCAmelCase ) ) )
UpperCAmelCase = DataLoader(_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase = accelerator.prepare(_lowerCAmelCase )
return dl
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
UpperCAmelCase = create_dataloader(accelerator=_lowerCAmelCase , dataset_size=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = create_accelerator(even_batches=_lowerCAmelCase )
verify_dataloader_batch_sizes(
_lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = create_accelerator(even_batches=_lowerCAmelCase )
UpperCAmelCase = torch.nn.Linear(1 , 1 )
UpperCAmelCase = accelerator.prepare(_lowerCAmelCase )
UpperCAmelCase = create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 )
UpperCAmelCase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_lowerCAmelCase ):
UpperCAmelCase = ddp_model(batch[0].float() )
UpperCAmelCase = output.sum()
loss.backward()
batch_idxs.append(_lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
with warnings.catch_warnings(record=_lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _lowerCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = create_accelerator(even_batches=_lowerCAmelCase )
UpperCAmelCase = torch.nn.Linear(1 , 1 )
UpperCAmelCase = accelerator.prepare(_lowerCAmelCase )
UpperCAmelCase = create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 )
UpperCAmelCase = create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCAmelCase ):
UpperCAmelCase = train_dl.batch_sampler.even_batches
UpperCAmelCase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = create_accelerator(even_batches=_lowerCAmelCase )
UpperCAmelCase = torch.nn.Linear(1 , 1 )
UpperCAmelCase = accelerator.prepare(_lowerCAmelCase )
create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCAmelCase )
UpperCAmelCase = create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCAmelCase ):
UpperCAmelCase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = create_accelerator()
UpperCAmelCase = torch.nn.Linear(1 , 1 )
UpperCAmelCase = accelerator.prepare(_lowerCAmelCase )
create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCAmelCase )
with warnings.catch_warnings(record=_lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCAmelCase ):
pass
assert issubclass(w[-1].category , _lowerCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
UpperCAmelCase = accelerator.state.distributed_type
UpperCAmelCase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_lowerCAmelCase )
UpperCAmelCase = original_state
if __name__ == "__main__":
main()
| 405
|
class __magic_name__ ( _a):
pass
class __magic_name__ ( _a):
pass
class __magic_name__ :
def __init__( self : Optional[int] ):
UpperCAmelCase = [
[],
[],
[],
]
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ):
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__SCREAMING_SNAKE_CASE )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def _UpperCAmelCase ( self : List[str] ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : Optional[Any] ):
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __magic_name__ :
def __init__( self : Any ):
UpperCAmelCase = []
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : int ):
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
UpperCAmelCase = min(self.queue )
self.queue.remove(__SCREAMING_SNAKE_CASE )
return data
def __str__( self : Optional[Any] ):
return str(self.queue )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(_lowerCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(_lowerCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 405
| 1
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
UpperCAmelCase = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
UpperCAmelCase = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : Optional[Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =simple_accuracy(lowercase__, lowercase__ )
__lowercase =float(fa_score(y_true=lowercase__, y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( lowercase__ : int, lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =np.array(lowercase__ )
__lowercase =np.array(lowercase__ )
__lowercase =en_sentvecs.shape[0]
# mean centering
__lowercase =en_sentvecs - np.mean(lowercase__, axis=0 )
__lowercase =in_sentvecs - np.mean(lowercase__, axis=0 )
__lowercase =cdist(lowercase__, lowercase__, 'cosine' )
__lowercase =np.array(range(lowercase__ ) )
__lowercase =sim.argsort(axis=1 )[:, :10]
__lowercase =np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : str ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def snake_case ( self : Optional[Any] , __lowercase : List[str] , __lowercase : List[Any] ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowercase , __lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowercase , __lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 119
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''OwlViTFeatureExtractor''']
UpperCAmelCase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
| 1
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_3 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=9_9 ,lowerCamelCase_=6_4 ,lowerCamelCase_=3_2 ,lowerCamelCase_=5 ,lowerCamelCase_=4 ,lowerCamelCase_=3_7 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=1_6 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=3 ,lowerCamelCase_=4 ,lowerCamelCase_=None ,) -> Any:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = embedding_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Dict:
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = MegatronBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
A = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
A = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[Any]:
A = MegatronBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = MegatronBertForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
A = MegatronBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = MegatronBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
A = MegatronBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
A = self.num_labels
A = MegatronBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = self.num_labels
A = MegatronBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = self.num_choices
A = MegatronBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
# test_resize_embeddings = False
_lowerCamelCase = False
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=False ) -> List[str]:
A = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def UpperCamelCase__ ( self ) -> str:
A = MegatronBertModelTester(self )
A = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=3_7 )
def UpperCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase_ )
def _A ( _a : List[str] ):
"""simple docstring"""
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCAmelCase =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def UpperCamelCase__ ( self ) -> List[str]:
A = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
A = os.path.join(os.environ["""MYDIR"""] ,lowerCamelCase_ )
A = MegatronBertModel.from_pretrained(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.half()
A = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
A = model(lowerCamelCase_ )[0]
A = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape ,lowerCamelCase_ )
A = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
A = output[0, ii, jj]
A = expected[3 * ii + jj]
A = """ii={} jj={} a={} b={}""".format(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
self.assertTrue(math.isclose(lowerCamelCase_ ,lowerCamelCase_ ,rel_tol=lowerCamelCase_ ,abs_tol=lowerCamelCase_ ) ,msg=lowerCamelCase_ )
| 255
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ="https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase ={
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _A ( _a : Optional[Any] ):
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 1_0:
A = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 1_0:
A = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 1_0:
A = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 1_0:
A = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
A = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
A = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
A = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _A ( _a : Union[str, Any] , _a : Union[str, Any] , _a : Union[str, Any] , _a : List[Any] ):
"""simple docstring"""
A = {}
import re
A = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_a ):
A = re_encoder_block_conv_in.match(_a )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] )
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
A = re_encoder_block_conv_in.sub(_a , _a )
elif re_encoder_block_resnet.fullmatch(_a ):
A = re_encoder_block_resnet.match(_a )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] )
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_encoder_block_resnet.sub(_a , _a )
elif re_encoder_block_proj_out.fullmatch(_a ):
A = re_encoder_block_proj_out.match(_a )
A = regex_match.groups()
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
A = re_encoder_block_proj_out.sub(_a , _a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_a ):
A = re_decoder_block_conv_out.match(_a )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] ) - 2
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
A = re_decoder_block_conv_out.sub(_a , _a )
elif re_decoder_block_resnet.fullmatch(_a ):
A = re_decoder_block_resnet.match(_a )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] ) - 2
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_decoder_block_resnet.sub(_a , _a )
elif re_decoder_block_proj_in.fullmatch(_a ):
A = re_decoder_block_proj_in.match(_a )
A = regex_match.groups()
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
A = re_decoder_block_proj_in.sub(_a , _a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_a ):
A = re_prior_cond_conv_out.match(_a )
A = regex_match.groups()
A = int(groups[1] ) * 2 + int(groups[2] ) - 2
A = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
A = re_prior_cond_conv_out.sub(_a , _a )
elif re_prior_cond_resnet.fullmatch(_a ):
A = re_prior_cond_resnet.match(_a )
A = regex_match.groups()
A = int(groups[1] ) * 2 + int(groups[2] ) - 2
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_prior_cond_resnet.sub(_a , _a )
elif re_prior_cond_proj_in.fullmatch(_a ):
A = re_prior_cond_proj_in.match(_a )
A = regex_match.groups()
A = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
A = re_prior_cond_proj_in.sub(_a , _a )
# keep original key
else:
A = original_key
A = replace_key(_a )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
A = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
A = original_key
A = original_key
A = value
return new_dict
@torch.no_grad()
def _A ( _a : Optional[Any]=None , _a : str=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
A = requests.get(f'{PREFIX}{file}' , allow_redirects=_a )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=_a )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content )
A = MODEL_MAPPING[model_name.split("""/""" )[-1]]
A = JukeboxConfig.from_pretrained(_a )
A = JukeboxModel(_a )
A = []
A = {}
for i, dict_name in enumerate(_a ):
A = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""]
A = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
A = old_dic[k]
elif k.endswith(""".w""" ):
A = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A = old_dic[k]
else:
A = old_dic[k]
A = """vqvae""" if i == 0 else f'priors.{3 - i}'
A = fix_jukebox_keys(_a , model.state_dict() , _a , _a )
weight_dict.append(_a )
A = weight_dict.pop(0 )
model.vqvae.load_state_dict(_a )
for i in range(len(_a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_a ).mkdir(exist_ok=_a )
with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile:
json.dump(_a , _a )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 255
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {"vocab_file": "spiece.model"}
lowerCAmelCase: Union[str, Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class a__( lowerCamelCase__ ):
def __init__( self : Optional[Any] , __snake_case : str , __snake_case : List[str]=False , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=False , __snake_case : List[str]="<s>" , __snake_case : List[str]="</s>" , __snake_case : Dict="<unk>" , __snake_case : Tuple="<sep>" , __snake_case : str="<pad>" , __snake_case : str="<cls>" , __snake_case : str="<mask>" , __snake_case : int=["<eop>", "<eod>"] , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Any , ):
a : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
a : List[Any] = 3
a : List[Any] = do_lower_case
a : Optional[Any] = remove_space
a : Optional[Any] = keep_accents
a : Any = vocab_file
a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
a : Optional[int] = jieba
a : Any = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase_ ( self : Optional[int] ):
return len(self.sp_model )
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
a : Optional[int] = self.__dict__.copy()
a : List[Any] = None
return state
def __setstate__( self : List[Any] , __snake_case : str ):
a : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : List[str] = {}
a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Dict , __snake_case : Tuple ):
if self.remove_space:
a : List[Any] = ' '.join(inputs.strip().split() )
else:
a : List[str] = inputs
a : Dict = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
a : str = unicodedata.normalize('NFKD' , __UpperCamelCase )
a : Dict = ''.join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] )
if self.do_lower_case:
a : Any = outputs.lower()
return outputs
def lowercase_ ( self : str , __snake_case : str ):
a : Optional[Any] = self.preprocess_text(__UpperCamelCase )
a : str = self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
a : Union[str, Any] = []
for piece in pieces:
if len(__UpperCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
a : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a : List[Any] = cur_pieces[1:]
else:
a : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCamelCase )
else:
new_pieces.append(__UpperCamelCase )
return new_pieces
def lowercase_ ( self : Any , __snake_case : int ):
return self.sp_model.PieceToId(__UpperCamelCase )
def lowercase_ ( self : List[str] , __snake_case : List[Any] ):
return self.sp_model.IdToPiece(__UpperCamelCase )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[Any] ):
a : Tuple = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def lowercase_ ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : Dict = [self.sep_token_id]
a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1, 1]
return ([0] * len(__UpperCamelCase )) + [1, 1]
def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : List[Any] = [self.sep_token_id]
a : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self : List[str] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Any = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Any ):
a : Any = super()._decode(*__UpperCamelCase , **__UpperCamelCase )
a : Tuple = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 526
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
_UpperCAmelCase = f'{src_lang}-{tgt_lang}'
_UpperCAmelCase = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''README.md''' )
print(f'Generating {path}' )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
__A : int = Path(__file__).resolve().parent.parent.parent
__A : List[Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__A , __A , __A : List[Any] = model_name.split("-")
__A : Optional[Any] = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 602
| 0
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , ) -> List[str]:
__UpperCamelCase = size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = apply_ocr
def __lowercase( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def __lowercase( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'apply_ocr' ) )
def __lowercase( self ) -> int:
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __lowercase( self ) -> str:
pass
def __lowercase( self ) -> List[Any]:
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , _SCREAMING_SNAKE_CASE )
# Test batched
__UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowercase( self ) -> Tuple:
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowercase( self ) -> List[Any]:
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowercase( self ) -> List[Any]:
# with apply_OCR = True
__UpperCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , _SCREAMING_SNAKE_CASE )
# with apply_OCR = False
__UpperCamelCase = LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 383
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_snake_case = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase( cls ) -> int:
__UpperCamelCase = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='test-model-flax' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='valid_org/test-model-flax-org' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
def _a ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = True
__UpperCamelCase = flatten_dict(modela.params )
__UpperCamelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__UpperCamelCase = False
return models_are_equal
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='10KB' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Dict:
__UpperCamelCase = 'bert'
__UpperCamelCase = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = 'bert'
__UpperCamelCase = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 383
| 1
|
__a = """Alexander Joslin"""
import operator as op
from .stack import Stack
def UpperCamelCase_ ( a_ ) ->int:
A ={"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
A =Stack()
A =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a_ ) )
elif i in operators:
# RULE 2
operator_stack.push(a_ )
elif i == ")":
# RULE 4
A =operator_stack.peek()
operator_stack.pop()
A =operand_stack.peek()
operand_stack.pop()
A =operand_stack.peek()
operand_stack.pop()
A =operators[opr](a_ , a_ )
operand_stack.push(a_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__a = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 689
|
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@slow
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case :Any = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Tuple = TFAutoModel.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Optional[int] = AutoModel.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case :Union[str, Any] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Tuple = TFAutoModelForPreTraining.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Any = AutoModelForPreTraining.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :str = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :str = TFAutoModelForCausalLM.from_pretrained(a__ , from_pt=a__ )
__snake_case , __snake_case :str = TFAutoModelForCausalLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Dict = AutoModelForCausalLM.from_pretrained(a__ , from_tf=a__ )
__snake_case , __snake_case :Optional[int] = AutoModelForCausalLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :List[str] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Optional[int] = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :int = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :Tuple = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :List[str] = TFAutoModelForMaskedLM.from_pretrained(a__ , from_pt=a__ )
__snake_case , __snake_case :Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Union[str, Any] = AutoModelForMaskedLM.from_pretrained(a__ , from_tf=a__ )
__snake_case , __snake_case :Optional[int] = AutoModelForMaskedLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Any:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :List[str] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Any = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , from_pt=a__ )
__snake_case , __snake_case :Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a__ , from_tf=a__ )
__snake_case , __snake_case :Any = AutoModelForSeqaSeqLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case :List[str] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :str = TFAutoModelForSequenceClassification.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Dict = AutoModelForSequenceClassification.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> str:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case :List[str] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :str = AutoModelForQuestionAnswering.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :str = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_44_10 )
__snake_case :int = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_44_10 )
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[Any] = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_44_10 )
__snake_case :int = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_44_10 )
| 455
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = """ResNetConfig"""
# Base docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = [1, 2048, 7, 7]
# Image classification docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = """tiger cat"""
lowerCamelCase__ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ = 3 , a__ = 1 , a__ = "relu" ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__snake_case :Dict = nn.Convad(
a__ , a__ , kernel_size=a__ , stride=a__ , padding=kernel_size // 2 , bias=a__ )
__snake_case :str = nn.BatchNormad(a__ )
__snake_case :List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowercase ( self , a__ ) -> Tensor:
'''simple docstring'''
__snake_case :int = self.convolution(a__ )
__snake_case :Any = self.normalization(a__ )
__snake_case :Optional[int] = self.activation(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ ) -> str:
'''simple docstring'''
super().__init__()
__snake_case :Dict = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__snake_case :List[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__snake_case :Tuple = config.num_channels
def __lowercase ( self , a__ ) -> Tensor:
'''simple docstring'''
__snake_case :Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__snake_case :Optional[int] = self.embedder(a__ )
__snake_case :int = self.pooler(a__ )
return embedding
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__snake_case :Optional[Any] = nn.Convad(a__ , a__ , kernel_size=1 , stride=a__ , bias=a__ )
__snake_case :Tuple = nn.BatchNormad(a__ )
def __lowercase ( self , a__ ) -> Tensor:
'''simple docstring'''
__snake_case :Any = self.convolution(a__ )
__snake_case :str = self.normalization(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ = 1 , a__ = "relu" ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case :int = in_channels != out_channels or stride != 1
__snake_case :Tuple = (
ResNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
__snake_case :Optional[int] = nn.Sequential(
ResNetConvLayer(a__ , a__ , stride=a__ ) , ResNetConvLayer(a__ , a__ , activation=a__ ) , )
__snake_case :Union[str, Any] = ACTaFN[activation]
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :int = hidden_state
__snake_case :Dict = self.layer(a__ )
__snake_case :Any = self.shortcut(a__ )
hidden_state += residual
__snake_case :List[Any] = self.activation(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ = 1 , a__ = "relu" , a__ = 4 ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case :Optional[int] = in_channels != out_channels or stride != 1
__snake_case :List[Any] = out_channels // reduction
__snake_case :List[str] = (
ResNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
__snake_case :int = nn.Sequential(
ResNetConvLayer(a__ , a__ , kernel_size=1 ) , ResNetConvLayer(a__ , a__ , stride=a__ ) , ResNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
__snake_case :Dict = ACTaFN[activation]
def __lowercase ( self , a__ ) -> Any:
'''simple docstring'''
__snake_case :List[str] = hidden_state
__snake_case :List[Any] = self.layer(a__ )
__snake_case :List[Any] = self.shortcut(a__ )
hidden_state += residual
__snake_case :Optional[Any] = self.activation(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , ) -> Any:
'''simple docstring'''
super().__init__()
__snake_case :Optional[int] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__snake_case :Tuple = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(a__ , a__ , stride=a__ , activation=config.hidden_act ) , *[layer(a__ , a__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowercase ( self , a__ ) -> Tensor:
'''simple docstring'''
__snake_case :Union[str, Any] = input
for layer in self.layers:
__snake_case :str = layer(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__snake_case :Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__snake_case :Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a__ , config.depths[1:] ):
self.stages.append(ResNetStage(a__ , a__ , a__ , depth=a__ ) )
def __lowercase ( self , a__ , a__ = False , a__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
__snake_case :Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case :Optional[int] = hidden_states + (hidden_state,)
__snake_case :Tuple = stage_module(a__ )
if output_hidden_states:
__snake_case :Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=a__ , )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : List[Any] = ResNetConfig
lowerCamelCase : Optional[Any] = "resnet"
lowerCamelCase : str = "pixel_values"
lowerCamelCase : Optional[int] = True
def __lowercase ( self , a__ ) -> Dict:
'''simple docstring'''
if isinstance(a__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowercase ( self , a__ , a__=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(a__ , a__ ):
__snake_case :Union[str, Any] = value
lowerCamelCase__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , lowercase_ , )
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__ ) -> Tuple:
'''simple docstring'''
super().__init__(a__ )
__snake_case :int = config
__snake_case :Any = ResNetEmbeddings(a__ )
__snake_case :Dict = ResNetEncoder(a__ )
__snake_case :Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self , a__ , a__ = None , a__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
__snake_case :List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case :List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case :int = self.embedder(a__ )
__snake_case :Any = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ )
__snake_case :Any = encoder_outputs[0]
__snake_case :int = self.pooler(a__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase_ , )
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__ ) -> List[Any]:
'''simple docstring'''
super().__init__(a__ )
__snake_case :Union[str, Any] = config.num_labels
__snake_case :Optional[int] = ResNetModel(a__ )
# classification head
__snake_case :List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self , a__ = None , a__ = None , a__ = None , a__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
__snake_case :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case :Tuple = self.resnet(a__ , output_hidden_states=a__ , return_dict=a__ )
__snake_case :Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
__snake_case :Optional[Any] = self.classifier(a__ )
__snake_case :Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case :List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case :List[Any] = """single_label_classification"""
else:
__snake_case :Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
__snake_case :Any = MSELoss()
if self.num_labels == 1:
__snake_case :Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case :Any = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
__snake_case :int = CrossEntropyLoss()
__snake_case :List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case :List[str] = BCEWithLogitsLoss()
__snake_case :int = loss_fct(a__ , a__ )
if not return_dict:
__snake_case :int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , lowercase_ , )
class snake_case__ ( lowercase_ , lowercase_):
'''simple docstring'''
def __init__( self , a__ ) -> int:
'''simple docstring'''
super().__init__(a__ )
super()._init_backbone(a__ )
__snake_case :Optional[int] = [config.embedding_size] + config.hidden_sizes
__snake_case :str = ResNetEmbeddings(a__ )
__snake_case :Any = ResNetEncoder(a__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@replace_return_docstrings(output_type=a__ , config_class=_CONFIG_FOR_DOC )
def __lowercase ( self , a__ , a__ = None , a__ = None ) -> BackboneOutput:
'''simple docstring'''
__snake_case :int = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case :Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case :List[str] = self.embedder(a__ )
__snake_case :List[Any] = self.encoder(a__ , output_hidden_states=a__ , return_dict=a__ )
__snake_case :Optional[int] = outputs.hidden_states
__snake_case :Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__snake_case :Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=a__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=a__ , )
| 455
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['''MobileNetV2FeatureExtractor''']
A_ : Tuple = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
| 1
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
UpperCamelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :List[Any] , lowerCamelCase__ :Tuple ):
return FSMTTokenizer.from_pretrained(lowerCamelCase__ )
def __a ( self :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Union[str, Any] = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def __a ( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCamelCase__ :Optional[Any] = f"""facebook/wmt19-{pair}"""
UpperCamelCase__ :List[str] = self.get_tokenizer(lowerCamelCase__ )
UpperCamelCase__ :List[str] = self.get_model(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = bleu_data[pair]["""src"""]
UpperCamelCase__ :Optional[Any] = bleu_data[pair]["""tgt"""]
UpperCamelCase__ :Union[str, Any] = tokenizer(lowerCamelCase__ , return_tensors="""pt""" , truncation=lowerCamelCase__ , padding="""longest""" ).to(lowerCamelCase__ )
UpperCamelCase__ :str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCamelCase__ :Any = tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
UpperCamelCase__ :Dict = calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertGreaterEqual(scores["""bleu"""] , lowerCamelCase__ )
| 45
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[int]:
UpperCAmelCase = set()
for token in tokens:
UpperCAmelCase = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
UpperCAmelCase = list(lowerCAmelCase_ )
return word_list
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Union[str, Any]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
UpperCAmelCase = bert_tokens
UpperCAmelCase , UpperCAmelCase = 0, len(lowerCAmelCase_ )
while start < end:
UpperCAmelCase = True
if is_chinese(bert_word[start] ):
UpperCAmelCase = min(end - start , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , 1 , -1 ):
UpperCAmelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase = """##""" + bert_word[j]
UpperCAmelCase = start + i
UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_0_0 ):
UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws
UpperCAmelCase = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_0_0 ):
UpperCAmelCase = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
UpperCAmelCase = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = []
for id in input_ids:
UpperCAmelCase = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
UpperCAmelCase = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = [json.dumps(lowerCAmelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
__a = parser.parse_args()
main(args)
| 377
| 0
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __a ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase_ = []
for part_id in partition_order:
lowercase_ = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase_ = spark.range(100 ).repartition(1 )
lowercase_ = Spark(__lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase_ = spark.range(10 ).repartition(2 )
lowercase_ = [1, 0]
lowercase_ = _generate_iterable_examples(__lowerCamelCase , __lowerCamelCase ) # Reverse the partitions.
lowercase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCamelCase , __lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase_ , lowercase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) -> Tuple:
'''simple docstring'''
lowercase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase_ = spark.range(10 ).repartition(1 )
lowercase_ = SparkExamplesIterable(__lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowerCamelCase ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) -> Tuple:
'''simple docstring'''
lowercase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase_ = lambda __lowerCamelCase : x.reverse()
lowercase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCamelCase , [2, 1, 0] )
lowercase_ = SparkExamplesIterable(__lowerCamelCase ).shuffle_data_sources(__lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowerCamelCase ):
lowercase_ , lowercase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) -> Tuple:
'''simple docstring'''
lowercase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase_ = SparkExamplesIterable(__lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowerCamelCase ):
lowercase_ , lowercase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase_ = SparkExamplesIterable(__lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowerCamelCase ):
lowercase_ , lowercase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) -> List[Any]:
'''simple docstring'''
lowercase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase_ = spark.range(100 ).repartition(1 )
lowercase_ = Spark(__lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 461
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase_ : Any = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class lowercase :
lowerCamelCase_ =True
lowerCamelCase_ =None
# Automatically constructed
lowerCamelCase_ ="PIL.Image.Image"
lowerCamelCase_ =pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase_ =field(default='Image' , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self : List[Any]) -> List[Any]:
return self.pa_type
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowercase_ = np.array(__lowerCAmelCase)
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
return {"path": value, "bytes": None}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
return {"path": None, "bytes": value}
elif isinstance(__lowerCAmelCase , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCAmelCase)
elif isinstance(__lowerCAmelCase , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCAmelCase)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : dict , __lowerCAmelCase : Dict=None) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
lowercase_ = {}
lowercase_ , lowercase_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.')
else:
if is_local_path(__lowerCAmelCase):
lowercase_ = PIL.Image.open(__lowerCAmelCase)
else:
lowercase_ = path.split("::")[-1]
try:
lowercase_ = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL)["repo_id"]
lowercase_ = token_per_repo_id.get(__lowerCAmelCase)
except ValueError:
lowercase_ = None
with xopen(__lowerCAmelCase , "rb" , use_auth_token=__lowerCAmelCase) as f:
lowercase_ = BytesIO(f.read())
lowercase_ = PIL.Image.open(bytes_)
else:
lowercase_ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __UpperCAmelCase ( self : Tuple) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def __UpperCAmelCase ( self : str , __lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.binary())
lowercase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
lowercase_ = storage.field("bytes")
else:
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.binary())
if storage.type.get_field_index("path") >= 0:
lowercase_ = storage.field("path")
else:
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
lowercase_ = pa.array(
[encode_np_array(np.array(__lowerCAmelCase))["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__lowerCAmelCase , self.pa_type)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase : int):
with xopen(__lowerCAmelCase , "rb") as f:
lowercase_ = f.read()
return bytes_
lowercase_ = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ = pa.array(
[os.path.basename(__lowerCAmelCase) if path is not None else None for path in storage.field("path").to_pylist()] , type=pa.string() , )
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__lowerCAmelCase , self.pa_type)
def __a ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __a ( __lowerCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
lowercase_ = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ = image.format
else:
lowercase_ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def __a ( __lowerCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(__lowerCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def __a ( __lowerCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowercase_ = array.dtype
lowercase_ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowercase_ = dtype.kind
lowercase_ = dtype.itemsize
lowercase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
lowercase_ = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowercase_ = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def __a ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowercase_ , lowercase_ = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
lowercase_ = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
lowercase_ = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 461
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__SCREAMING_SNAKE_CASE : List[str] = field(
default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__SCREAMING_SNAKE_CASE : str = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
__SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__SCREAMING_SNAKE_CASE : Tuple = field(default=__snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : int = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE : List[Any] = field(
default=__snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
UpperCAmelCase = import_module('tasks' )
try:
UpperCAmelCase = getattr(_lowerCamelCase , model_args.task_type )
UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
UpperCAmelCase = dict(enumerate(_lowerCamelCase ) )
UpperCAmelCase = len(_lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowerCAmelCase : np.ndarray , lowerCAmelCase : np.ndarray ) -> Tuple[List[int], List[int]]:
UpperCAmelCase = np.argmax(_lowerCamelCase , axis=2 )
UpperCAmelCase = preds.shape
UpperCAmelCase = [[] for _ in range(_lowerCamelCase )]
UpperCAmelCase = [[] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowerCamelCase , _lowerCamelCase ),
"precision": precision_score(_lowerCamelCase , _lowerCamelCase ),
"recall": recall_score(_lowerCamelCase , _lowerCamelCase ),
"f1": fa_score(_lowerCamelCase , _lowerCamelCase ),
}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowerCamelCase , _lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowerCamelCase )
# Predict
if training_args.do_predict:
UpperCAmelCase = TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCAmelCase = trainer.predict(_lowerCamelCase )
UpperCAmelCase = align_predictions(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowerCamelCase , _lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
UpperCAmelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return results
def lowercase__ ( lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 373
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "imagegpt"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _lowercase=512 + 1 , _lowercase=32 * 32 , _lowercase=512 , _lowercase=24 , _lowercase=8 , _lowercase=None , _lowercase="quick_gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : int = vocab_size
__a : Union[str, Any] = n_positions
__a : List[str] = n_embd
__a : Union[str, Any] = n_layer
__a : List[str] = n_head
__a : int = n_inner
__a : Any = activation_function
__a : List[str] = resid_pdrop
__a : str = embd_pdrop
__a : str = attn_pdrop
__a : Tuple = layer_norm_epsilon
__a : str = initializer_range
__a : Dict = scale_attn_weights
__a : Optional[int] = use_cache
__a : Optional[Any] = scale_attn_by_inverse_layer_idx
__a : Optional[Any] = reorder_and_upcast_attn
__a : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowerCAmelCase__(self , _lowercase , _lowercase = 1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 32 , _lowercase = 32 , ):
'''simple docstring'''
__a : Any = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
__a : Union[str, Any] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
return inputs
| 581
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = 10
def lowercase_ ( self , **__lowercase ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__lowercase )
return config
def lowercase_ ( self ) -> Union[str, Any]:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowercase )
def lowercase_ ( self ) -> str:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def lowercase_ ( self ) -> List[str]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowercase )
def lowercase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = self.scheduler_classes[0]
lowerCAmelCase_ : int = self.get_scheduler_config()
lowerCAmelCase_ : int = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : List[Any] = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase_ : str = model(__lowercase , __lowercase )
lowerCAmelCase_ : str = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase_ : List[str] = output.prev_sample
lowerCAmelCase_ : Any = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase_ : Optional[int] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1e-2
assert abs(result_mean.item() - 0.01_31 ) < 1e-3
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : str = self.scheduler_classes[0]
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase_ : Tuple = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : List[str] = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : List[Any] = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase_ : int = model(__lowercase , __lowercase )
lowerCAmelCase_ : str = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase_ : str = output.prev_sample
lowerCAmelCase_ : int = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase_ : Any = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 0.00_02 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = self.scheduler_classes[0]
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ : Tuple = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = self.dummy_model()
lowerCAmelCase_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase_ : Optional[Any] = sample.to(__lowercase )
for t in scheduler.timesteps:
lowerCAmelCase_ : Union[str, Any] = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase_ : Any = model(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase_ : Optional[Any] = output.prev_sample
lowerCAmelCase_ : Optional[int] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase_ : str = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1e-2
assert abs(result_mean.item() - 0.01_31 ) < 1e-3
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : Dict = self.get_scheduler_config()
lowerCAmelCase_ : int = scheduler_class(**__lowercase , use_karras_sigmas=__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase_ : Union[str, Any] = sample.to(__lowercase )
for t in scheduler.timesteps:
lowerCAmelCase_ : List[str] = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = model(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase_ : Dict = output.prev_sample
lowerCAmelCase_ : Optional[Any] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1e-3
| 619
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 619
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_a : str = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_a : List[Any] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_a : List[Any] = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""",id="""sequence""" ),
"""references""": datasets.Value("""string""",id="""sequence""" ),
} ),reference_urls=[],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__lowerCAmelCase = np.array([re.sub(__SCREAMING_SNAKE_CASE,"""""",__SCREAMING_SNAKE_CASE ) for x in predictions] )
__lowerCAmelCase = np.array([re.sub(__SCREAMING_SNAKE_CASE,"""""",__SCREAMING_SNAKE_CASE ) for x in references] )
else:
__lowerCAmelCase = np.asarray(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
__lowerCAmelCase = np.char.lower(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
__lowerCAmelCase = string.punctuation.maketrans("""""","""""",string.punctuation )
__lowerCAmelCase = np.char.translate(__SCREAMING_SNAKE_CASE,table=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.char.translate(__SCREAMING_SNAKE_CASE,table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
__lowerCAmelCase = string.digits.maketrans("""""","""""",string.digits )
__lowerCAmelCase = np.char.translate(__SCREAMING_SNAKE_CASE,table=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.char.translate(__SCREAMING_SNAKE_CASE,table=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 1_00}
| 689
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase__ ( __A: List[Any] ,__A: List[Any]=False ,__A: Tuple=False ,__A: Dict=False ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def lowercase__ ( __A: List[Any] ,__A: Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__magic_name__ : Union[str, Any] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Optional[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
__magic_name__ : str = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : str = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
__magic_name__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : int = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : Any = in_proj_bias[-config.hidden_size :]
def lowercase__ ( __A: Dict ):
'''simple docstring'''
__magic_name__ : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__A ,__A )
def lowercase__ ( __A: List[Any] ,__A: Tuple ,__A: Any ):
'''simple docstring'''
__magic_name__ : str = dct.pop(__A )
__magic_name__ : Optional[Any] = val
@torch.no_grad()
def lowercase__ ( __A: str ,__A: Optional[int] ):
'''simple docstring'''
__magic_name__ : str = ViltConfig(image_size=3_8_4 ,patch_size=3_2 ,tie_word_embeddings=__A )
__magic_name__ : Tuple = False
__magic_name__ : Union[str, Any] = False
__magic_name__ : Tuple = False
__magic_name__ : Optional[Any] = False
if "vqa" in checkpoint_url:
__magic_name__ : Dict = True
__magic_name__ : List[Any] = 3_1_2_9
__magic_name__ : str = '''huggingface/label-files'''
__magic_name__ : Tuple = '''vqa2-id2label.json'''
__magic_name__ : int = json.load(open(hf_hub_download(__A ,__A ,repo_type='''dataset''' ) ,'''r''' ) )
__magic_name__ : str = {int(__A ): v for k, v in idalabel.items()}
__magic_name__ : Optional[Any] = idalabel
__magic_name__ : Optional[Any] = {v: k for k, v in idalabel.items()}
__magic_name__ : Dict = ViltForQuestionAnswering(__A )
elif "nlvr" in checkpoint_url:
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[int] = 2
__magic_name__ : Optional[Any] = {0: '''False''', 1: '''True'''}
__magic_name__ : Tuple = {v: k for k, v in config.idalabel.items()}
__magic_name__ : Optional[int] = 3
__magic_name__ : int = ViltForImagesAndTextClassification(__A )
elif "irtr" in checkpoint_url:
__magic_name__ : int = True
__magic_name__ : Dict = ViltForImageAndTextRetrieval(__A )
elif "mlm_itm" in checkpoint_url:
__magic_name__ : Any = True
__magic_name__ : Optional[int] = ViltForMaskedLM(__A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
__magic_name__ : Optional[Any] = torch.hub.load_state_dict_from_url(__A ,map_location='''cpu''' )['''state_dict''']
__magic_name__ : Union[str, Any] = create_rename_keys(__A ,__A ,__A ,__A )
for src, dest in rename_keys:
rename_key(__A ,__A ,__A )
read_in_q_k_v(__A ,__A )
if mlm_model or irtr_model:
__magic_name__ : List[str] = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__A ,__A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__magic_name__ : Any = model.load_state_dict(__A ,strict=__A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__A )
# Define processor
__magic_name__ : Tuple = ViltImageProcessor(size=3_8_4 )
__magic_name__ : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ : Tuple = ViltProcessor(__A ,__A )
# Forward pass on example inputs (image + text)
if nlvr_model:
__magic_name__ : str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' ,stream=__A ).raw )
__magic_name__ : int = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' ,stream=__A ).raw )
__magic_name__ : Optional[int] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
__magic_name__ : int = processor(__A ,__A ,return_tensors='''pt''' )
__magic_name__ : Tuple = processor(__A ,__A ,return_tensors='''pt''' )
__magic_name__ : Union[str, Any] = model(
input_ids=encoding_a.input_ids ,pixel_values=encoding_a.pixel_values ,pixel_values_a=encoding_a.pixel_values ,)
else:
__magic_name__ : Dict = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,stream=__A ).raw )
if mlm_model:
__magic_name__ : int = '''a bunch of [MASK] laying on a [MASK].'''
else:
__magic_name__ : Optional[Any] = '''How many cats are there?'''
__magic_name__ : Tuple = processor(__A ,__A ,return_tensors='''pt''' )
__magic_name__ : Any = model(**__A )
# Verify outputs
if mlm_model:
__magic_name__ : int = torch.Size([1, 1_1, 3_0_5_2_2] )
__magic_name__ : Tuple = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,__A ,atol=1e-4 )
# verify masked token prediction equals "cats"
__magic_name__ : List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__magic_name__ : List[Any] = torch.Size([1, 3_1_2_9] )
__magic_name__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] ,__A ,atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,__A ,atol=1e-4 )
# verify vqa prediction equals "2"
__magic_name__ : int = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__magic_name__ : List[str] = torch.Size([1, 2] )
__magic_name__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] ,__A ,atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__A ).mkdir(exist_ok=__A )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase ( _lowerCamelCase ,_lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''nat'''
UpperCamelCase__ ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , lowerCamelCase_ : Dict=4 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : List[str]=64 , lowerCamelCase_ : Union[str, Any]=[3, 4, 6, 5] , lowerCamelCase_ : List[Any]=[2, 4, 8, 16] , lowerCamelCase_ : List[Any]=7 , lowerCamelCase_ : Union[str, Any]=3.0 , lowerCamelCase_ : int=True , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Tuple=1E-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : str=None , lowerCamelCase_ : Tuple=None , **lowerCamelCase_ : Any , ) -> Any:
super().__init__(**lowerCamelCase_ )
__magic_name__ : List[Any] = patch_size
__magic_name__ : str = num_channels
__magic_name__ : Union[str, Any] = embed_dim
__magic_name__ : Dict = depths
__magic_name__ : str = len(lowerCamelCase_ )
__magic_name__ : List[str] = num_heads
__magic_name__ : Optional[int] = kernel_size
__magic_name__ : Any = mlp_ratio
__magic_name__ : Any = qkv_bias
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Any = attention_probs_dropout_prob
__magic_name__ : List[str] = drop_path_rate
__magic_name__ : str = hidden_act
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : str = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
__magic_name__ : str = layer_scale_init_value
__magic_name__ : Dict = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
__magic_name__ , __magic_name__ : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 501
| 0
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase = False
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """ybelkada/fonts"""
def _lowerCamelCase ( ) -> int:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Any, UpperCAmelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(UpperCAmelCase_, ["torch"] )
_check_torch_version()
A__ = image_tensor.unsqueeze(0 )
A__ = torch.nn.functional.unfold(UpperCAmelCase_, (patch_height, patch_width), stride=(patch_height, patch_width) )
A__ = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), UpperCAmelCase_, UpperCAmelCase_, -1 )
A__ = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : int = 36, UpperCAmelCase_ : str = "black", UpperCAmelCase_ : str = "white", UpperCAmelCase_ : int = 5, UpperCAmelCase_ : int = 5, UpperCAmelCase_ : int = 5, UpperCAmelCase_ : int = 5, UpperCAmelCase_ : Optional[bytes] = None, UpperCAmelCase_ : Optional[str] = None, ) -> Image.Image:
"""simple docstring"""
requires_backends(UpperCAmelCase_, "vision" )
# Add new lines so that each line is no more than 80 characters.
A__ = textwrap.TextWrapper(width=80 )
A__ = wrapper.wrap(text=UpperCAmelCase_ )
A__ = "\n".join(UpperCAmelCase_ )
if font_bytes is not None and font_path is None:
A__ = io.BytesIO(UpperCAmelCase_ )
elif font_path is not None:
A__ = font_path
else:
A__ = hf_hub_download(UpperCAmelCase_, "Arial.TTF" )
A__ = ImageFont.truetype(UpperCAmelCase_, encoding="UTF-8", size=UpperCAmelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
A__ = ImageDraw.Draw(Image.new("RGB", (1, 1), UpperCAmelCase_ ) )
A__ , A__ , A__ , A__ = temp_draw.textbbox((0, 0), UpperCAmelCase_, UpperCAmelCase_ )
# Create the actual image with a bit of padding around the text.
A__ = text_width + left_padding + right_padding
A__ = text_height + top_padding + bottom_padding
A__ = Image.new("RGB", (image_width, image_height), UpperCAmelCase_ )
A__ = ImageDraw.Draw(UpperCAmelCase_ )
draw.text(xy=(left_padding, top_padding), text=UpperCAmelCase_, fill=UpperCAmelCase_, font=UpperCAmelCase_ )
return image
def _lowerCamelCase ( UpperCAmelCase_ : np.ndarray, UpperCAmelCase_ : str, **UpperCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
requires_backends(UpperCAmelCase_, "vision" )
# Convert to PIL image if necessary
A__ = to_pil_image(UpperCAmelCase_ )
A__ = render_text(UpperCAmelCase_, **UpperCAmelCase_ )
A__ = max(header_image.width, image.width )
A__ = int(image.height * (new_width / image.width) )
A__ = int(header_image.height * (new_width / header_image.width) )
A__ = Image.new("RGB", (new_width, new_height + new_header_height), "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
A__ = to_numpy_array(UpperCAmelCase_ )
if infer_channel_dimension_format(UpperCAmelCase_ ) == ChannelDimension.LAST:
A__ = to_channel_dimension_format(UpperCAmelCase_, ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : int = ["flattened_patches"]
def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 2048 , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
A__ = do_normalize
A__ = do_convert_rgb
A__ = max_patches
A__ = is_vqa
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
A__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , ChannelDimension.FIRST )
A__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
A__ , A__ = patch_size["height"], patch_size["width"]
A__ , A__ = get_image_size(SCREAMING_SNAKE_CASE__ )
# maximize scale s.t.
A__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
A__ = max(min(math.floor(scale * image_height / patch_height ) , SCREAMING_SNAKE_CASE__ ) , 1 )
A__ = max(min(math.floor(scale * image_width / patch_width ) , SCREAMING_SNAKE_CASE__ ) , 1 )
A__ = max(num_feasible_rows * patch_height , 1 )
A__ = max(num_feasible_cols * patch_width , 1 )
A__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE__ , antialias=SCREAMING_SNAKE_CASE__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
A__ = torch_extract_patches(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = patches.shape
A__ = patches_shape[1]
A__ = patches_shape[2]
A__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
A__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
A__ = torch.arange(SCREAMING_SNAKE_CASE__ ).reshape([rows, 1] ).repeat(1 , SCREAMING_SNAKE_CASE__ ).reshape([rows * columns, 1] )
A__ = torch.arange(SCREAMING_SNAKE_CASE__ ).reshape([1, columns] ).repeat(SCREAMING_SNAKE_CASE__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
A__ = row_ids.to(torch.floataa )
A__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
A__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
A__ = torch.nn.functional.pad(SCREAMING_SNAKE_CASE__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
A__ = to_numpy_array(SCREAMING_SNAKE_CASE__ )
return result
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
if image.dtype == np.uinta:
A__ = image.astype(np.floataa )
# take mean across the whole `image`
A__ = np.mean(SCREAMING_SNAKE_CASE__ )
A__ = np.std(SCREAMING_SNAKE_CASE__ )
A__ = max(SCREAMING_SNAKE_CASE__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> ImageInput:
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = patch_size if patch_size is not None else self.patch_size
A__ = max_patches if max_patches is not None else self.max_patches
A__ = self.is_vqa
if kwargs.get("data_format" , SCREAMING_SNAKE_CASE__ ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
A__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
A__ = kwargs.pop("font_bytes" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("font_path" , SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [header_text] * len(SCREAMING_SNAKE_CASE__ )
A__ = [
render_header(SCREAMING_SNAKE_CASE__ , header_text[i] , font_bytes=SCREAMING_SNAKE_CASE__ , font_path=SCREAMING_SNAKE_CASE__ )
for i, image in enumerate(SCREAMING_SNAKE_CASE__ )
]
if do_normalize:
A__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ ) for image in images]
# convert to torch tensor and permute
A__ = [
self.extract_flattened_patches(image=SCREAMING_SNAKE_CASE__ , max_patches=SCREAMING_SNAKE_CASE__ , patch_size=SCREAMING_SNAKE_CASE__ )
for image in images
]
# create attention mask in numpy
A__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
A__ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_outputs
| 104
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_lowerCAmelCase : Any = datasets.utils.logging.get_logger(__name__)
_lowerCAmelCase : Any = ["names", "prefix"]
_lowerCAmelCase : str = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
_lowerCAmelCase : List[str] = ["encoding_errors", "on_bad_lines"]
_lowerCAmelCase : int = ["date_format"]
@dataclass
class __snake_case ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE__ = ","
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "infer"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "."
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = '"'
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 10000
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "strict"
SCREAMING_SNAKE_CASE__ = "error"
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if self.delimiter is not None:
lowerCAmelCase__ = self.delimiter
if self.column_names is not None:
lowerCAmelCase__ = self.column_names
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,a_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __snake_case ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE__ = CsvConfig
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ ,(str, list, tuple) ):
lowerCAmelCase__ = data_files
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
lowerCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ ,gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if self.config.features is not None:
lowerCAmelCase__ = self.config.features.arrow_schema
if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ):
# cheaper cast
lowerCAmelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=a_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCAmelCase__ = table_cast(a_ ,a_ )
return pa_table
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCAmelCase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
lowerCAmelCase__ = pd.read_csv(a_ ,iterator=a_ ,dtype=a_ ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a_ ):
lowerCAmelCase__ = pa.Table.from_pandas(a_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(a_ )}: {e}' )
raise
| 193
| 0
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def lowercase ( self : Optional[int] ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowercase ( self : List[str] ) -> List[Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowercase ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowercase ( self : List[str] ) -> torch.Tensor:
__lowerCAmelCase = torch.arange(self.height * self.width )
__lowerCAmelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCAmelCase_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase , *__lowerCAmelCase = self.shape
__lowerCAmelCase = int(np.prod(lowerCAmelCase_ ) )
__lowerCAmelCase = self.get_image_coords()
__lowerCAmelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCAmelCase = self.get_camera_rays(lowerCAmelCase_ )
__lowerCAmelCase = rays.view(lowerCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowercase ( self : Optional[int] , lowerCAmelCase_ : torch.Tensor ) -> torch.Tensor:
__lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCAmelCase = coords.view(lowerCAmelCase_ , -1 , 2 )
__lowerCAmelCase = self.resolution()
__lowerCAmelCase = self.fov()
__lowerCAmelCase = (flat.float() / (res - 1)) * 2 - 1
__lowerCAmelCase = fracs * torch.tan(fov / 2 )
__lowerCAmelCase = fracs.view(lowerCAmelCase_ , -1 , 2 )
__lowerCAmelCase = (
self.z.view(lowerCAmelCase_ , 1 , 3 )
+ self.x.view(lowerCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCAmelCase = directions / directions.norm(dim=-1 , keepdim=lowerCAmelCase_ )
__lowerCAmelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCAmelCase_ , *lowerCAmelCase_ , 2 , 3 )
def lowercase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCAmelCase_ , height=lowerCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for theta in np.linspace(0, 2 * np.pi, num=20 ):
__lowerCAmelCase = np.array([np.sin(lowerCAmelCase_ ), np.cos(lowerCAmelCase_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCAmelCase = -z * 4
__lowerCAmelCase = np.array([np.cos(lowerCAmelCase_ ), -np.sin(lowerCAmelCase_ ), 0.0] )
__lowerCAmelCase = np.cross(lowerCAmelCase_, lowerCAmelCase_ )
origins.append(lowerCAmelCase_ )
xs.append(lowerCAmelCase_ )
ys.append(lowerCAmelCase_ )
zs.append(lowerCAmelCase_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase_, axis=0 ) ).float(), x=torch.from_numpy(np.stack(lowerCAmelCase_, axis=0 ) ).float(), y=torch.from_numpy(np.stack(lowerCAmelCase_, axis=0 ) ).float(), z=torch.from_numpy(np.stack(lowerCAmelCase_, axis=0 ) ).float(), width=lowerCAmelCase_, height=lowerCAmelCase_, x_fov=0.7, y_fov=0.7, shape=(1, len(lowerCAmelCase_ )), )
| 421
|
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if not (isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and isinstance(lowerCAmelCase_, lowerCAmelCase_ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in range(1, texta_length + 1 ):
for j in range(1, texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCAmelCase = i
__lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421
| 1
|
import datasets
from .evaluate import evaluate
_a : str = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_a : Union[str, Any] = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_a : List[str] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
__lowerCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
__lowerCamelCase = evaluate(dataset=UpperCAmelCase , predictions=UpperCAmelCase )
return score
| 479
|
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCamelCase__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
__lowerCamelCase = float(embedding_dim // 2 )
__lowerCamelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowerCamelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
__lowerCamelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
__lowerCamelCase = scale * emb
if flip_sin_to_cos:
__lowerCamelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
__lowerCamelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
__lowerCamelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
A = 32
A = jnp.floataa
@nn.compact
def __call__( self , UpperCAmelCase ):
__lowerCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(UpperCAmelCase )
__lowerCamelCase = nn.silu(UpperCAmelCase )
__lowerCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(UpperCAmelCase )
return temb
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
A = 32
A = False
A = 1
@nn.compact
def __call__( self , UpperCAmelCase ):
return get_sinusoidal_embeddings(
UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 479
| 1
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A = logging.getLogger(__name__)
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=UpperCamelCase__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=UpperCamelCase__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=UpperCamelCase__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=UpperCamelCase__ , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=UpperCamelCase__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=UpperCamelCase__ , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=UpperCamelCase__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
__lowerCamelCase = parser.parse_args()
return args
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
def fn(UpperCamelCase__ : Union[str, Any] ):
return tokenizer(examples['text'] )
return fn
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
__lowerCamelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
__lowerCamelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
__lowerCamelCase = tf.train.Features(feature=UpperCamelCase__ )
__lowerCamelCase = tf.train.Example(features=UpperCamelCase__ )
__lowerCamelCase = example.SerializeToString()
records.append(UpperCamelCase__ )
return records
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowerCamelCase = min(len(UpperCamelCase__ ) , args.limit )
__lowerCamelCase = dataset.select(range(UpperCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowerCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
__lowerCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowerCamelCase = tokenize_function(UpperCamelCase__ )
__lowerCamelCase = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCamelCase__ : List[str] ):
# Concatenate all texts.
__lowerCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowerCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowerCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowerCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowerCamelCase = dataset_tokenized.map(UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=1000 , num_proc=4 )
__lowerCamelCase = 0
__lowerCamelCase = 0
for shard in range(0 , len(UpperCamelCase__ ) , args.shard_size ):
__lowerCamelCase = grouped_dataset[shard : shard + args.shard_size]
__lowerCamelCase = len(dataset_snapshot['input_ids'] )
__lowerCamelCase = os.path.join(UpperCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
__lowerCamelCase = get_serialized_examples(UpperCamelCase__ )
with tf.io.TFRecordWriter(UpperCamelCase__ ) as out_file:
for i in range(len(UpperCamelCase__ ) ):
__lowerCamelCase = serialized_examples[i]
out_file.write(UpperCamelCase__ )
print('Wrote file {} containing {} records'.format(UpperCamelCase__ , UpperCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , 'w' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=UpperCamelCase__ )
if __name__ == "__main__":
__A = parse_args()
main(args)
| 167
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , 'width_multiplier' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__="swish" , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=None , lowerCamelCase__=0.25 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = make_divisible(512 * width_multiplier , divisor=8 )
__lowerCamelCase = hidden_act
__lowerCamelCase = conv_kernel_size
__lowerCamelCase = output_stride
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = use_labels
__lowerCamelCase = is_training
__lowerCamelCase = num_labels
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = width_multiplier
__lowerCamelCase = ffn_dropout
__lowerCamelCase = attn_dropout
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = MobileViTVaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileViTVaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileViTVaForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = MobileViTVaModelTester(self )
__lowerCamelCase = MobileViTVaConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = 5
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCamelCase = 2
for i in range(len(lowerCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MobileViTVaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__lowerCamelCase = model.to(lowerCamelCase__ )
__lowerCamelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = outputs.logits
# verify the logits
__lowerCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__lowerCamelCase = model.to(lowerCamelCase__ )
__lowerCamelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = outputs.logits.detach().cpu()
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(50, 60)] )
__lowerCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
__lowerCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 167
| 1
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
@property
def UpperCamelCase__ ( self ):
return self.get_dummy_input()
@property
def UpperCamelCase__ ( self ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def UpperCamelCase__ ( self , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=False , ):
lowerCamelCase : str = 4
lowerCamelCase : Tuple = 3_2
lowerCamelCase : Union[str, Any] = (3_2, 3_2)
lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase : Dict = torch.device(_a )
lowerCamelCase : Optional[int] = (batch_size, num_channels) + sizes
lowerCamelCase : List[str] = randn_tensor(_a , generator=_a , device=_a )
lowerCamelCase : str = {"""hidden_states""": hidden_states}
if include_temb:
lowerCamelCase : List[str] = 1_2_8
lowerCamelCase : str = randn_tensor((batch_size, temb_channels) , generator=_a , device=_a )
if include_res_hidden_states_tuple:
lowerCamelCase : List[str] = torch.manual_seed(1 )
lowerCamelCase : Union[str, Any] = (randn_tensor(_a , generator=_a , device=_a ),)
if include_encoder_hidden_states:
lowerCamelCase : List[Any] = floats_tensor((batch_size, 3_2, 3_2) ).to(_a )
if include_skip_sample:
lowerCamelCase : Any = randn_tensor(((batch_size, 3) + sizes) , generator=_a , device=_a )
return dummy_input
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
"""temb_channels""": 1_2_8,
}
if self.block_type == "up":
lowerCamelCase : int = 3_2
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
lowerCamelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase : Optional[Any] = self.block_class(**_a )
unet_block.to(_a )
unet_block.eval()
with torch.no_grad():
lowerCamelCase : Union[str, Any] = unet_block(**_a )
if isinstance(_a , _a ):
lowerCamelCase : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCamelCase : Any = output[0, -1, -3:, -3:]
lowerCamelCase : str = torch.tensor(_a ).to(_a )
assert torch_all_close(output_slice.flatten() , _a , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase : str = self.block_class(**_a )
model.to(_a )
model.train()
lowerCamelCase : int = model(**_a )
if isinstance(_a , _a ):
lowerCamelCase : str = output[0]
lowerCamelCase : Dict = torch.device(_a )
lowerCamelCase : List[str] = randn_tensor(output.shape , device=_a )
lowerCamelCase : Tuple = torch.nn.functional.mse_loss(_a , _a )
loss.backward()
| 681
|
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( UpperCamelCase__ : Any = 1_000 ) -> int:
'''simple docstring'''
_snake_case = 2**power
_snake_case = 0
while n:
_snake_case , _snake_case = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 703
|
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
if index == r:
for j in range(UpperCamelCase__ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_snake_case = arr[i]
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index + 1 , UpperCamelCase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
_snake_case = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 0 , UpperCamelCase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 541
| 0
|
from manim import *
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_= Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase_= Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""CPU""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(4 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""GPU""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Model""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= []
UpperCAmelCase_= []
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
UpperCAmelCase_= Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Loaded Checkpoint""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= []
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
UpperCAmelCase_= target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase_= Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_= MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase_= MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCAmelCase_= [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_= [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Disk""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase_= MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 593
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_= ort.SessionOptions()
UpperCAmelCase_= False
return options
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCAmelCase_= load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
UpperCAmelCase_= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= """A red cat sitting on a park bench"""
UpperCAmelCase_= np.random.RandomState(0 )
UpperCAmelCase_= pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCAmelCase , output_type="""np""" , )
UpperCAmelCase_= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 593
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A : Union[str, Any] = 1_2_8_0_2_2
A : Any = 1_2_8_0_2_8
@require_sentencepiece
class A (__lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = MaMaaaTokenizer
__lowerCamelCase : Tuple = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : int = True
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().setUp()
A__ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
A__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
A__ = Path(self.tmpdirname )
save_json(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A__ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Any , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def a_ ( self : Dict , __lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = '''</s>'''
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def a_ ( self : str ) -> List[str]:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(lowerCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
pass
def a_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2, 3, 4, 5, 6] , )
A__ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
A__ = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , """This is a test""" )
@slow
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = {'''input_ids''': [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = "facebook/m2m100_418M"
__lowerCamelCase : Any = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
__lowerCamelCase : Optional[Any] = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
__lowerCamelCase : List[Any] = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def a_ ( cls : Tuple ) -> Tuple:
"""simple docstring"""
A__ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
A__ = 1
return cls
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_80_06 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_80_22 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_80_76 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_80_63 )
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
A__ = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , lowerCamelCase__ )
def a_ ( self : Any ) -> List[Any]:
"""simple docstring"""
A__ = '''en'''
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def a_ ( self : List[str] ) -> Dict:
"""simple docstring"""
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
A__ = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
A__ = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def a_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCamelCase__ )
A__ = MaMaaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCamelCase__ )
@require_torch
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
A__ = '''en'''
A__ = '''fr'''
A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors="""pt""" )
A__ = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
A__ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
A__ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A__ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
A__ = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[12_80_22, 58, 41_83, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_80_06,
} , )
| 718
|
from __future__ import annotations
A : List[Any] = 1_0
def __lowerCamelCase ( __a :list[int] ) -> list[int]:
"""simple docstring"""
A__ = 1
A__ = max(__a )
while placement <= max_digit:
# declare and initialize empty buckets
A__ = [[] for _ in range(__a )]
# split list_of_ints between the buckets
for i in list_of_ints:
A__ = int((i / placement) % RADIX )
buckets[tmp].append(__a )
# put each buckets' contents into list_of_ints
A__ = 0
for b in range(__a ):
for i in buckets[b]:
A__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247
| 0
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
a_ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
a_ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowercase ( datasets.Metric):
"""simple docstring"""
def __UpperCamelCase (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=True , lowercase__=False ):
if rouge_types is None:
snake_case_ : str = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
snake_case_ : Union[str, Any] = rouge_scorer.RougeScorer(rouge_types=lowercase__ , use_stemmer=lowercase__ )
if use_aggregator:
snake_case_ : Tuple = scoring.BootstrapAggregator()
else:
snake_case_ : Any = []
for ref, pred in zip(lowercase__ , lowercase__ ):
snake_case_ : Dict = scorer.score(lowercase__ , lowercase__ )
if use_aggregator:
aggregator.add_scores(lowercase__ )
else:
scores.append(lowercase__ )
if use_aggregator:
snake_case_ : Union[str, Any] = aggregator.aggregate()
else:
snake_case_ : Union[str, Any] = {}
for key in scores[0]:
snake_case_ : Dict = [score[key] for score in scores]
return result
| 480
|
"""simple docstring"""
from math import factorial
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 2_0 ):
"""simple docstring"""
snake_case_ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ : Tuple = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 480
| 1
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (CMStochasticIterativeScheduler,)
UpperCAmelCase_ :Any = 10
def __lowerCAmelCase ( self , **__A ) -> Dict:
lowerCAmelCase_ :Optional[int] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
config.update(**__A )
return config
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[str] = 10
lowerCAmelCase_ :Tuple = self.get_scheduler_config()
lowerCAmelCase_ :str = self.scheduler_classes[0](**__A )
scheduler.set_timesteps(__A )
lowerCAmelCase_ :int = scheduler.timesteps[0]
lowerCAmelCase_ :int = scheduler.timesteps[1]
lowerCAmelCase_ :Optional[int] = self.dummy_sample
lowerCAmelCase_ :Dict = 0.1 * sample
lowerCAmelCase_ :Tuple = scheduler.step(__A , __A , __A ).prev_sample
lowerCAmelCase_ :List[str] = scheduler.step(__A , __A , __A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __lowerCAmelCase ( self ) -> Dict:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[str] = self.scheduler_classes[0]
lowerCAmelCase_ :Optional[Any] = self.get_scheduler_config()
lowerCAmelCase_ :Tuple = scheduler_class(**__A )
lowerCAmelCase_ :str = 1
scheduler.set_timesteps(__A )
lowerCAmelCase_ :Dict = scheduler.timesteps
lowerCAmelCase_ :Any = torch.manual_seed(0 )
lowerCAmelCase_ :Any = self.dummy_model()
lowerCAmelCase_ :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__A ):
# 1. scale model input
lowerCAmelCase_ :Optional[int] = scheduler.scale_model_input(__A , __A )
# 2. predict noise residual
lowerCAmelCase_ :Union[str, Any] = model(__A , __A )
# 3. predict previous sample x_t-1
lowerCAmelCase_ :Tuple = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
lowerCAmelCase_ :Optional[Any] = pred_prev_sample
lowerCAmelCase_ :Tuple = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :List[str] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1E-3
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase_ :Dict = self.get_scheduler_config()
lowerCAmelCase_ :Any = scheduler_class(**__A )
lowerCAmelCase_ :Dict = [106, 0]
scheduler.set_timesteps(timesteps=__A )
lowerCAmelCase_ :List[Any] = scheduler.timesteps
lowerCAmelCase_ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = self.dummy_model()
lowerCAmelCase_ :str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCAmelCase_ :Optional[Any] = scheduler.scale_model_input(__A , __A )
# 2. predict noise residual
lowerCAmelCase_ :Tuple = model(__A , __A )
# 3. predict previous sample x_t-1
lowerCAmelCase_ :Tuple = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
lowerCAmelCase_ :str = pred_prev_sample
lowerCAmelCase_ :Tuple = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :int = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ :Optional[int] = self.get_scheduler_config()
lowerCAmelCase_ :List[str] = scheduler_class(**__A )
lowerCAmelCase_ :Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(__A , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :str = self.scheduler_classes[0]
lowerCAmelCase_ :int = self.get_scheduler_config()
lowerCAmelCase_ :Optional[int] = scheduler_class(**__A )
lowerCAmelCase_ :int = [39, 30, 12, 1, 0]
lowerCAmelCase_ :List[str] = len(__A )
with self.assertRaises(__A , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__A , timesteps=__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Dict = self.scheduler_classes[0]
lowerCAmelCase_ :int = self.get_scheduler_config()
lowerCAmelCase_ :Any = scheduler_class(**__A )
lowerCAmelCase_ :Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__A , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__A )
| 256
|
"""simple docstring"""
__UpperCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__UpperCAmelCase = [None] * 10_00_00_00
__UpperCAmelCase = True
__UpperCAmelCase = False
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCAmelCase_ :Optional[int] = chain(next_number(lowercase__ ) )
lowerCAmelCase_ :Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
lowerCAmelCase_ :List[Any] = number_chain
number *= 1_0
return number_chain
def _snake_case ( lowercase__ : int = 1_0_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
for i in range(1 , lowercase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 256
| 1
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''efficientnet'''
def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[str] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Union[str, Any] = width_coefficient
snake_case_ : Tuple = depth_coefficient
snake_case_ : Optional[Any] = depth_divisor
snake_case_ : Optional[int] = kernel_sizes
snake_case_ : str = in_channels
snake_case_ : Optional[Any] = out_channels
snake_case_ : int = depthwise_padding
snake_case_ : Optional[Any] = strides
snake_case_ : Any = num_block_repeats
snake_case_ : Optional[Any] = expand_ratios
snake_case_ : Union[str, Any] = squeeze_expansion_ratio
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dim
snake_case_ : Any = pooling_type
snake_case_ : List[str] = initializer_range
snake_case_ : str = batch_norm_eps
snake_case_ : Optional[int] = batch_norm_momentum
snake_case_ : Optional[Any] = dropout_rate
snake_case_ : List[str] = drop_connect_rate
snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
| 60
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase =False
class lowerCAmelCase__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
A__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ = torch.manual_seed(0 )
A__ = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
A__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 337
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def snake_case_ (_a : int , _a : int ):
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def snake_case_ (_a : int ):
UpperCAmelCase = []
UpperCAmelCase = 1_1
UpperCAmelCase = int('''1''' + '''0''' * digit_len )
for num in range(_a , _a ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(_a , _a ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
UpperCAmelCase = 1_0
return solutions
def snake_case_ (_a : int = 2 ):
UpperCAmelCase = 1.0
for fraction in fraction_list(_a ):
UpperCAmelCase = Fraction(_a )
result *= frac.denominator / frac.numerator
return int(_a )
if __name__ == "__main__":
print(solution())
| 358
|
'''simple docstring'''
def snake_case_ (_a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case_ (_a : list[list[int]] , _a : list[int] , _a : int ):
# Base Case
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def snake_case_ (_a : list[list[int]] , _a : int = 0 ):
UpperCAmelCase = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase = UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 358
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ) -> Tuple:
_lowercase : List[str] = {}
if train_file is not None:
_lowercase : Tuple = [train_file]
if eval_file is not None:
_lowercase : Optional[Any] = [eval_file]
if test_file is not None:
_lowercase : Dict = [test_file]
_lowercase : Dict = datasets.load_dataset('csv' , data_files=SCREAMING_SNAKE_CASE )
_lowercase : Dict = list(ds[list(files.keys() )[0]].features.keys() )
_lowercase : List[Any] = features_name.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowercase : Dict = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_lowercase : Tuple = tokenizer.model_input_names
_lowercase : int = {}
if len(SCREAMING_SNAKE_CASE ) == 1:
for k in files.keys():
_lowercase : List[str] = ds[k].map(
lambda SCREAMING_SNAKE_CASE : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' ) , batched=SCREAMING_SNAKE_CASE , )
elif len(SCREAMING_SNAKE_CASE ) == 2:
for k in files.keys():
_lowercase : Tuple = ds[k].map(
lambda SCREAMING_SNAKE_CASE : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , ) , batched=SCREAMING_SNAKE_CASE , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowercase : str = {k: v for k, v in ex.items() if k in input_names}
_lowercase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowercase : str = {k: v for k, v in ex.items() if k in input_names}
_lowercase : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowercase : int = {k: v for k, v in ex.items() if k in input_names}
_lowercase : str = labelaid[ex[label_name]]
yield (d, label)
_lowercase : str = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowercase : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowercase : int = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowercase : str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowercase : List[str] = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowercase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : int = field(metadata={"help": "Which column contains the label"} )
_UpperCamelCase : str = field(default=__snake_case , metadata={"help": "The path of the training file"} )
_UpperCamelCase : Optional[str] = field(default=__snake_case , metadata={"help": "The path of the development file"} )
_UpperCamelCase : Optional[str] = field(default=__snake_case , metadata={"help": "The path of the test file"} )
_UpperCamelCase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __magic_name__ ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowercase , _lowercase , _lowercase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase , _lowercase , _lowercase , _lowercase : int = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=SCREAMING_SNAKE_CASE , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_lowercase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowercase : Optional[Any] = TFTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowercase : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : int = trainer.evaluate()
_lowercase : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 66
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase_ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : Optional[str] = None ):
_lowerCamelCase : List[str] = (
os.path.join(__A,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCamelCase : int = Extractor
def lowerCamelCase_ ( self : List[str],__A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCamelCase : str = os.path.abspath(__A )
return os.path.join(self.extract_dir,hash_url_to_filename(__A ) )
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : bool ):
return force_extract or (
not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A ))
)
def lowerCamelCase_ ( self : Tuple,__A : str,__A : bool = False ):
_lowerCamelCase : Tuple = self.extractor.infer_extractor_format(__A )
if not extractor_format:
return input_path
_lowerCamelCase : int = self._get_output_path(__A )
if self._do_extract(__A,__A ):
self.extractor.extract(__A,__A,__A )
return output_path
class UpperCAmelCase__ ( A ):
@classmethod
@abstractmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],**__A : int ):
...
@staticmethod
@abstractmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
...
class UpperCAmelCase__ ( A , A ):
lowerCAmelCase_ = []
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : int ):
with open(__A,"rb" ) as f:
return f.read(__A )
@classmethod
def lowerCamelCase_ ( cls : int,__A : Union[Path, str],__A : bytes = b"" ):
if not magic_number:
_lowerCamelCase : int = max(len(__A ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCamelCase : Any = cls.read_magic_number(__A,__A )
except OSError:
return False
return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( A ):
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],**__A : Optional[int] ):
return tarfile.is_tarfile(__A )
@staticmethod
def lowerCamelCase_ ( __A : Optional[Any],__A : Dict ):
def resolved(__A : str ) -> str:
return os.path.realpath(os.path.abspath(__A ) )
def badpath(__A : str,__A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__A,__A ) ).startswith(__A )
def badlink(__A : int,__A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCamelCase : Union[str, Any] = resolved(os.path.join(__A,os.path.dirname(info.name ) ) )
return badpath(info.linkname,base=__A )
_lowerCamelCase : Optional[Any] = resolved(__A )
for finfo in members:
if badpath(finfo.name,__A ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(__A,__A ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(__A,__A ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
os.makedirs(__A,exist_ok=__A )
_lowerCamelCase : int = tarfile.open(__A )
tar_file.extractall(__A,members=TarExtractor.safemembers(__A,__A ) )
tar_file.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x1F\x8B']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with gzip.open(__A,"rb" ) as gzip_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],__A : bytes = b"" ):
if super().is_extractable(__A,magic_number=__A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__A,"rb" ) as fp:
_lowerCamelCase : Optional[Any] = _EndRecData(__A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCamelCase : List[Any] = fp.read(__A ) # CD is where we expect it to be
if len(__A ) == sizeCentralDir:
_lowerCamelCase : List[Any] = struct.unpack(__A,__A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
os.makedirs(__A,exist_ok=__A )
with zipfile.ZipFile(__A,"r" ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with lzma.open(__A ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__A,exist_ok=__A )
_lowerCamelCase : int = rarfile.RarFile(__A )
rf.extractall(__A )
rf.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_lowerCamelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(__A,"rb" ) as ifh, open(__A,"wb" ) as ofh:
dctx.copy_stream(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x42\x5A\x68']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with bza.open(__A,"rb" ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__A,exist_ok=__A )
with pyazr.SevenZipFile(__A,"r" ) as archive:
archive.extractall(__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x04\x22\x4D\x18']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__A,"rb" ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase_ = {
'tar': TarExtractor,
'gzip': GzipExtractor,
'zip': ZipExtractor,
'xz': XzExtractor,
'rar': RarExtractor,
'zstd': ZstdExtractor,
'bz2': BzipaExtractor,
'7z': SevenZipExtractor, # <Added version="2.4.0"/>
'lz4': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
return max(
len(__A )
for extractor in cls.extractors.values()
if issubclass(__A,__A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(__A,magic_number_length=__A )
except OSError:
return b""
@classmethod
def lowerCamelCase_ ( cls : List[str],__A : Union[Path, str],__A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.",category=__A,)
_lowerCamelCase : Optional[Any] = cls.infer_extractor_format(__A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase_ ( cls : Optional[int],__A : Union[Path, str] ): # <Added version="2.4.0"/>
_lowerCamelCase : Optional[int] = cls._get_magic_number_max_length()
_lowerCamelCase : Union[str, Any] = cls._read_magic_number(__A,__A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__A,magic_number=__A ):
return extractor_format
@classmethod
def lowerCamelCase_ ( cls : List[Any],__A : Union[Path, str],__A : Union[Path, str],__A : Optional[str] = None,__A : Optional[BaseExtractor] = "deprecated",):
os.makedirs(os.path.dirname(__A ),exist_ok=__A )
# Prevent parallel extractions
_lowerCamelCase : Union[str, Any] = str(Path(__A ).with_suffix(".lock" ) )
with FileLock(__A ):
shutil.rmtree(__A,ignore_errors=__A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__A,__A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.",category=__A,)
_lowerCamelCase : Dict = extractor if extractor != "deprecated" else extractor_format
else:
_lowerCamelCase : str = cls.extractors[extractor_format]
return extractor.extract(__A,__A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.",category=__A,)
for extractor in cls.extractors.values():
if extractor.is_extractable(__A ):
return extractor.extract(__A,__A )
| 703
|
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 11
| 0
|
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__a = mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__a = max(
mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , j - wt[i - 1] ) + val[i - 1] , )
__a = val
return f[i][j]
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__a = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__a = dp[i - 1][w_]
return dp[n][w_], dp
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
__a = len(lowerCAmelCase__ )
if num_items != len(lowerCAmelCase__ ):
__a = (
"""The number of weights must be the same as the number of values.\n"""
f'''But got {num_items} weights and {len(lowerCAmelCase__ )} values'''
)
raise ValueError(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
if not isinstance(wt[i] , lowerCAmelCase__ ):
__a = (
"""All weights must be integers but got weight of """
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(lowerCAmelCase__ )
__a , __a = knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = set()
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return optimal_val, example_optional_set
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
optimal_set.add(lowerCAmelCase__ )
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , j - wt[i - 1] , lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 99
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(__UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids, __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(__UpperCAmelCase ) # fails here
def lowercase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(3 )
SCREAMING_SNAKE_CASE : List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowercase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE : Union[str, Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 507
| 0
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = [0] * len(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : str = []
lowerCAmelCase_ : List[Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase__ )
while queue:
lowerCAmelCase_ : Optional[int] = queue.pop(0 )
cnt += 1
topo.append(lowerCAmelCase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCAmelCase__ )
if cnt != len(lowerCAmelCase__ ):
print('Cycle exists' )
else:
print(lowerCAmelCase__ )
# Adjacency List of Graph
lowercase__ : Optional[int] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 317
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
if index == len(lowerCAmelCase__ ):
return True
# Recursive Step
for i in range(lowerCAmelCase__ ):
if valid_coloring(graph[index] , lowerCAmelCase__ , lowerCAmelCase__ ):
# Color current vertex
lowerCAmelCase_ : List[str] = i
# Validate coloring
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index + 1 ):
return True
# Backtrack
lowerCAmelCase_ : Dict = -1
return False
def UpperCamelCase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : str = [-1] * len(lowerCAmelCase__ )
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 0 ):
return colored_vertices
return []
| 317
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Dict = {'vocab_file': 'vocab.txt'}
lowercase_ : Optional[int] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowercase_ : Optional[Any] = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def A__ ( snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__: List[str]= collections.OrderedDict()
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
SCREAMING_SNAKE_CASE__: Union[str, Any]= reader.readlines()
for index, token in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__: Tuple= token.rstrip('''\n''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= index
return vocab
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase="<unk>" , lowerCAmelCase=200 ) -> str:
SCREAMING_SNAKE_CASE__: List[Any]= vocab
SCREAMING_SNAKE_CASE__: List[str]= unk_token
SCREAMING_SNAKE_CASE__: List[Any]= max_input_chars_per_word
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= list(__A )
if len(__A ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
SCREAMING_SNAKE_CASE__: Tuple= []
while start < len(__A ):
SCREAMING_SNAKE_CASE__: int= len(__A )
SCREAMING_SNAKE_CASE__: Optional[int]= None
while start < end:
SCREAMING_SNAKE_CASE__: Union[str, Any]= ''''''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__: Any= substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__A )
SCREAMING_SNAKE_CASE__: Optional[int]= end
return sub_tokens
class _lowerCamelCase ( UpperCamelCase_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["input_ids", "attention_mask"]
__a = False
def __init__( self , lowerCAmelCase , lowerCAmelCase="<d>" , lowerCAmelCase="</d>" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<unk>" , lowerCAmelCase="</n>" , lowerCAmelCase="</_>" , lowerCAmelCase="left" , **lowerCAmelCase , ) -> str:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__A , eod_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , unk_token=__A , line_token=__A , space_token=__A , padding_side=__A , **__A , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= bod_token
SCREAMING_SNAKE_CASE__: str= eod_token
SCREAMING_SNAKE_CASE__: Union[str, Any]= load_vocab(__A )
SCREAMING_SNAKE_CASE__: List[str]= self.encoder[space_token]
SCREAMING_SNAKE_CASE__: str= self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__: Any= collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase : x[1] ) )
SCREAMING_SNAKE_CASE__: Optional[int]= {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__: Optional[int]= WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCamelCase_ ( self ) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCamelCase_ ( self ) -> int:
return self.encoder[self.eod_token]
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
return self.encoder["\n"]
@property
def UpperCamelCase_ ( self ) -> int:
return len(self.encoder )
def UpperCamelCase_ ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Any= []
for x in jieba.cut(__A , cut_all=__A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__A ) )
return output_tokens
def UpperCamelCase_ ( self , lowerCAmelCase , **lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__: int= [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__A , **__A )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[Any]:
return token in self.encoder
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
return "".join(__A )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
return self.decoder.get(__A , self.unk_token )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]:
if os.path.isdir(__A ):
SCREAMING_SNAKE_CASE__: Dict= os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
SCREAMING_SNAKE_CASE__: Tuple= (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
SCREAMING_SNAKE_CASE__: List[Any]= 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__: int= self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__: Any= self.encoder['''\n''']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__: Tuple= collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase : x[1] ) )
with open(__A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A ))
return [1] + ([0] * len(__A ))
| 64
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int | float | str ) -> tuple[int, int]:
try:
SCREAMING_SNAKE_CASE_ : int =float(UpperCAmelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
SCREAMING_SNAKE_CASE_ : Any =decimal - int(UpperCAmelCase_ )
if fractional_part == 0:
return int(UpperCAmelCase_ ), 1
else:
SCREAMING_SNAKE_CASE_ : Any =len(str(UpperCAmelCase_ ).split('''.''' )[1] )
SCREAMING_SNAKE_CASE_ : str =int(decimal * (1_0**number_of_frac_digits) )
SCREAMING_SNAKE_CASE_ : Any =1_0**number_of_frac_digits
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =denominator, numerator
while True:
SCREAMING_SNAKE_CASE_ : Any =dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =divisor, remainder
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =numerator / divisor, denominator / divisor
return int(UpperCAmelCase_ ), int(UpperCAmelCase_ )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 443
| 0
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__A : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__A : Dict = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> str:
lowerCamelCase_ =0.0
for i, j in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 0.0
lowerCamelCase_ =n_correct / len(_SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 75
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ =get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( self )-> int:
lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ =get_activation("""gelu""" )
lowerCamelCase_ =get_activation("""gelu_10""" )
lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self )-> Dict:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation("""bogus""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =get_activation("""gelu""" )
lowerCamelCase_ =1
lowerCamelCase_ =get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =acta.a
| 75
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _lowerCamelCase( _a ):
lowercase_ : int = """ctrl"""
lowercase_ : List[Any] = ["""past_key_values"""]
lowercase_ : Any = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, lowerCamelCase=24_65_34, lowerCamelCase=2_56, lowerCamelCase=12_80, lowerCamelCase=81_92, lowerCamelCase=48, lowerCamelCase=16, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=1E-6, lowerCamelCase=0.0_2, lowerCamelCase=True, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = vocab_size
_lowercase : Optional[Any] = n_positions
_lowercase : int = n_embd
_lowercase : Union[str, Any] = n_layer
_lowercase : Tuple = n_head
_lowercase : Any = dff
_lowercase : Tuple = resid_pdrop
_lowercase : List[str] = embd_pdrop
_lowercase : Dict = layer_norm_epsilon
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = use_cache
super().__init__(**lowerCamelCase)
| 89
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__A ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A ={
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A ={
"unc-nlp/lxmert-base-uncased": 5_1_2,
}
__A ={
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = LxmertTokenizer
def __init__( self : Optional[int] , a_ : Optional[int]=None , a_ : List[Any]=None , a_ : List[Any]=True , a_ : List[str]="[UNK]" , a_ : int="[SEP]" , a_ : Optional[Any]="[PAD]" , a_ : int="[CLS]" , a_ : int="[MASK]" , a_ : Optional[Any]=True , a_ : Tuple=None , **a_ : Dict , ):
'''simple docstring'''
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
__UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , a_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , a_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , a_ ) != tokenize_chinese_chars
):
__UpperCAmelCase : Optional[int] = getattr(a_ , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : List[str] = do_lower_case
__UpperCAmelCase : Union[str, Any] = strip_accents
__UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars
__UpperCAmelCase : str = normalizer_class(**a_ )
__UpperCAmelCase : List[Any] = do_lower_case
def snake_case__ ( self : Optional[Any] , a_ : Optional[int] , a_ : str=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : str = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Dict , a_ : str , a_ : Optional[str] = None ):
'''simple docstring'''
__UpperCAmelCase : Dict = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 702
|
def a ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = len(_UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
__UpperCAmelCase : str = 0
print(_UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(_UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_UpperCAmelCase , end=''',''' )
__UpperCAmelCase : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =[1, 3, 0, 5, 8, 5]
__A =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 241
| 0
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ :Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ :Any = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "align_text_model"
def __init__( self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
_UpperCamelCase :str = vocab_size
_UpperCamelCase :str = hidden_size
_UpperCamelCase :List[Any] = num_hidden_layers
_UpperCamelCase :Dict = num_attention_heads
_UpperCamelCase :Any = hidden_act
_UpperCamelCase :Any = intermediate_size
_UpperCamelCase :int = hidden_dropout_prob
_UpperCamelCase :int = attention_probs_dropout_prob
_UpperCamelCase :str = max_position_embeddings
_UpperCamelCase :str = type_vocab_size
_UpperCamelCase :str = initializer_range
_UpperCamelCase :List[Any] = layer_norm_eps
_UpperCamelCase :List[Any] = position_embedding_type
_UpperCamelCase :int = use_cache
_UpperCamelCase :List[Any] = pad_token_id
@classmethod
def _UpperCamelCase( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase , _UpperCamelCase :Optional[int] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
_UpperCamelCase :Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "align_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 6_00 , SCREAMING_SNAKE_CASE__ = 2.0 , SCREAMING_SNAKE_CASE__ = 3.1 , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE__ = [32, 16, 24, 40, 80, 1_12, 1_92] , SCREAMING_SNAKE_CASE__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , SCREAMING_SNAKE_CASE__ = [] , SCREAMING_SNAKE_CASE__ = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE__ = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE__ = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE__ = 0.2_5 , SCREAMING_SNAKE_CASE__ = "swish" , SCREAMING_SNAKE_CASE__ = 25_60 , SCREAMING_SNAKE_CASE__ = "mean" , SCREAMING_SNAKE_CASE__ = 0.0_2 , SCREAMING_SNAKE_CASE__ = 0.0_0_1 , SCREAMING_SNAKE_CASE__ = 0.9_9 , SCREAMING_SNAKE_CASE__ = 0.2 , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
_UpperCamelCase :Any = num_channels
_UpperCamelCase :Optional[Any] = image_size
_UpperCamelCase :Dict = width_coefficient
_UpperCamelCase :int = depth_coefficient
_UpperCamelCase :List[Any] = depth_divisor
_UpperCamelCase :Optional[Any] = kernel_sizes
_UpperCamelCase :Optional[int] = in_channels
_UpperCamelCase :Dict = out_channels
_UpperCamelCase :Union[str, Any] = depthwise_padding
_UpperCamelCase :List[Any] = strides
_UpperCamelCase :str = num_block_repeats
_UpperCamelCase :str = expand_ratios
_UpperCamelCase :Optional[Any] = squeeze_expansion_ratio
_UpperCamelCase :List[Any] = hidden_act
_UpperCamelCase :List[str] = hidden_dim
_UpperCamelCase :List[str] = pooling_type
_UpperCamelCase :Union[str, Any] = initializer_range
_UpperCamelCase :int = batch_norm_eps
_UpperCamelCase :Dict = batch_norm_momentum
_UpperCamelCase :Optional[int] = drop_connect_rate
_UpperCamelCase :Tuple = sum(SCREAMING_SNAKE_CASE_ ) * 4
@classmethod
def _UpperCamelCase( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase , _UpperCamelCase :Any = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
_UpperCamelCase :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "align"
A = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=6_40 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=0.0_2 , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
if text_config is None:
_UpperCamelCase :int = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
_UpperCamelCase :Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
_UpperCamelCase :Any = AlignTextConfig(**SCREAMING_SNAKE_CASE_ )
_UpperCamelCase :Any = AlignVisionConfig(**SCREAMING_SNAKE_CASE_ )
_UpperCamelCase :Dict = projection_dim
_UpperCamelCase :Optional[int] = temperature_init_value
_UpperCamelCase :Dict = initializer_range
@classmethod
def _UpperCamelCase( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[Any] = copy.deepcopy(self.__dict__ )
_UpperCamelCase :List[Any] = self.text_config.to_dict()
_UpperCamelCase :Optional[Any] = self.vision_config.to_dict()
_UpperCamelCase :str = self.__class__.model_type
return output
| 355
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
_a = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_a = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _UpperCAmelCase , )
is not None
):
_a = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_a = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_a = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_a = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_a = True
if not attribute_used:
_a = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_a = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_a = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_a = True
elif attribute.endswith('_token_id' ):
_a = True
# configuration class specific cases
if not case_allowed:
_a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_a = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
_a = dict(inspect.signature(config_class.__init__ ).parameters )
_a = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_a = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_a = {}
if len(config_class.attribute_map ) > 0:
_a = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_a = inspect.getsourcefile(_UpperCAmelCase )
_a = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_a = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_a = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_a = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_a = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> str:
_a = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_a = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_a = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_a = unused_attributes
if len(_UpperCAmelCase ) > 0:
_a = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 562
| 0
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if num < 0:
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = -num
SCREAMING_SNAKE_CASE_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__: Tuple = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Any = '\n'.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('w' ).writelines(SCREAMING_SNAKE_CASE )
lowerCAmelCase__: Any = "patrickvonplaten/t5-tiny-random"
lowerCAmelCase__: Tuple = "sshleifer/bart-tiny-random"
lowerCAmelCase__: Dict = "sshleifer/tiny-mbart"
lowerCAmelCase__: Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case_ ( lowerCAmelCase ):
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
SCREAMING_SNAKE_CASE_ : Tuple = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : Dict = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
SCREAMING_SNAKE_CASE_ : List[str] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
SCREAMING_SNAKE_CASE_ : List[Any] = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(__lowerCAmelCase , 'argv' , __lowerCAmelCase ):
run_generate()
assert Path(__lowerCAmelCase ).exists()
# os.remove(Path(output_file_name))
def __A ( self ):
self.run_eval_tester(__lowerCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __A ( self , __lowerCAmelCase ):
self.run_eval_tester(__lowerCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
SCREAMING_SNAKE_CASE_ : Dict = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : str = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = str(tmp_dir / 'scores.json' )
SCREAMING_SNAKE_CASE_ : List[str] = str(tmp_dir / 'val.target' )
_dump_articles(__lowerCAmelCase , text['en'] )
_dump_articles(__lowerCAmelCase , text['de'] )
SCREAMING_SNAKE_CASE_ : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
SCREAMING_SNAKE_CASE_ : List[str] = F'\n run_eval_search.py\n {model}\n {str(__lowerCAmelCase )}\n {str(__lowerCAmelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(__lowerCAmelCase , 'argv' , __lowerCAmelCase ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE_ : Dict = [' num_beams | length_penalty', model, 'Best score args']
SCREAMING_SNAKE_CASE_ : Dict = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(__lowerCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__lowerCAmelCase ).exists()
os.remove(Path(__lowerCAmelCase ) )
| 311
| 0
|
"""simple docstring"""
import argparse
import struct
import unittest
class __magic_name__ :
def __init__( self : List[Any] , snake_case_ : bytes ):
__snake_case = data
# Initialize hash values
__snake_case = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
__snake_case = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
__snake_case = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCAmelCase ( snake_case_ : bytes ):
__snake_case = b"\x80" + (b"\x00" * (63 - (len(snake_case_ ) + 8) % 64))
__snake_case = struct.pack(">Q" , (len(snake_case_ ) * 8) )
return data + padding + big_endian_integer
def lowerCAmelCase ( self : Tuple ):
# Convert into blocks of 64 bytes
__snake_case = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__snake_case = list(struct.unpack(">16L" , snake_case_ ) )
# add 48 0-ed integers
words += [0] * 48
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__snake_case = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__snake_case = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__snake_case = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
__snake_case = self.ror(snake_case_ , 6 ) ^ self.ror(snake_case_ , 11 ) ^ self.ror(snake_case_ , 25 )
__snake_case = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
__snake_case = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
__snake_case = self.ror(snake_case_ , 2 ) ^ self.ror(snake_case_ , 13 ) ^ self.ror(snake_case_ , 22 )
__snake_case = (a & b) ^ (a & c) ^ (b & c)
__snake_case = (sa + maj) % 0x1_00_00_00_00
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
__snake_case = [a, b, c, d, e, f, g, h]
# Modify final values
__snake_case = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__snake_case = "".join([hex(snake_case_ )[2:].zfill(8 ) for value in self.hashes] )
def lowerCAmelCase ( self : List[Any] , snake_case_ : int , snake_case_ : int ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ):
import hashlib
__snake_case = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(snake_case_ ).hash , hashlib.shaaaa(snake_case_ ).hexdigest() )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
__snake_case = parser.parse_args()
__snake_case = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
__snake_case = f.read()
else:
__snake_case = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 163
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def lowerCAmelCase ( *snake_case_ : Dict , **snake_case_ : List[Any] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Dict ):
__snake_case = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__snake_case = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase ( self : str , snake_case_ : List[Any] , snake_case_ : str ):
__snake_case = vqa_pipeline(snake_case_ , top_k=1 )
self.assertEqual(
snake_case_ , [
[{"score": ANY(snake_case_ ), "answer": ANY(snake_case_ )}],
[{"score": ANY(snake_case_ ), "answer": ANY(snake_case_ )}],
] , )
@require_torch
def lowerCAmelCase ( self : Tuple ):
__snake_case = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__snake_case = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__snake_case = "How many cats are there?"
__snake_case = vqa_pipeline(image=snake_case_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
snake_case_ , [{"score": ANY(snake_case_ ), "answer": ANY(snake_case_ )}, {"score": ANY(snake_case_ ), "answer": ANY(snake_case_ )}] )
__snake_case = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
snake_case_ , [{"score": ANY(snake_case_ ), "answer": ANY(snake_case_ )}, {"score": ANY(snake_case_ ), "answer": ANY(snake_case_ )}] )
@slow
@require_torch
def lowerCAmelCase ( self : List[Any] ):
__snake_case = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__snake_case = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__snake_case = "How many cats are there?"
__snake_case = vqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__snake_case = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__snake_case = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase ( self : Dict ):
pass
| 163
| 1
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=5 ) -> List[Any]:
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
UpperCAmelCase_ : List[Any] = torch.tensor(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) ).unsqueeze(0 ) # Batch size 1
UpperCAmelCase_ : Tuple = model(__snake_case )[0] # The last hidden-state is the first element of the output tuple
UpperCAmelCase_ : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCAmelCase_ : str = logits[0, masked_index, :]
UpperCAmelCase_ : Optional[int] = logits.softmax(dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = prob.topk(k=__snake_case , dim=0 )
UpperCAmelCase_ : int = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__snake_case ) )] )
UpperCAmelCase_ : Tuple = tokenizer.mask_token
UpperCAmelCase_ : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
UpperCAmelCase_ : Optional[Any] = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(__snake_case ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(__snake_case ) , __snake_case ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__snake_case , __snake_case ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowerCamelCase = CamembertTokenizer.from_pretrained('''camembert-base''')
__lowerCamelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__lowerCamelCase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 455
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """lxmert"""
_lowerCamelCase = {}
def __init__( self ,lowercase=30522 ,lowercase=768 ,lowercase=12 ,lowercase=9500 ,lowercase=1600 ,lowercase=400 ,lowercase=3072 ,lowercase="gelu" ,lowercase=0.1 ,lowercase=0.1 ,lowercase=512 ,lowercase=2 ,lowercase=0.02 ,lowercase=1E-12 ,lowercase=9 ,lowercase=5 ,lowercase=5 ,lowercase=2048 ,lowercase=4 ,lowercase=6.67 ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Any = num_qa_labels
UpperCAmelCase_ : str = num_object_labels
UpperCAmelCase_ : Dict = num_attr_labels
UpperCAmelCase_ : Tuple = l_layers
UpperCAmelCase_ : Tuple = x_layers
UpperCAmelCase_ : int = r_layers
UpperCAmelCase_ : Optional[Any] = visual_feat_dim
UpperCAmelCase_ : List[Any] = visual_pos_dim
UpperCAmelCase_ : int = visual_loss_normalizer
UpperCAmelCase_ : str = task_matched
UpperCAmelCase_ : str = task_mask_lm
UpperCAmelCase_ : int = task_obj_predict
UpperCAmelCase_ : List[str] = task_qa
UpperCAmelCase_ : Optional[int] = visual_obj_loss
UpperCAmelCase_ : List[str] = visual_attr_loss
UpperCAmelCase_ : str = visual_feat_loss
UpperCAmelCase_ : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowercase)
| 455
| 1
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 7 , SCREAMING_SNAKE_CASE__ = 100_0000 ) -> int:
'''simple docstring'''
snake_case : str = 0
snake_case : Tuple = 1
for current_denominator in range(1 , limit + 1 ):
snake_case : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case : Optional[int] = current_numerator
snake_case : int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 638
|
'''simple docstring'''
import os
from collections.abc import Iterator
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE__ ):
snake_case : Optional[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE__ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).lstrip('''./''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
return F'{i * " "}*' if i else "\n##"
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(SCREAMING_SNAKE_CASE__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "." ) -> None:
'''simple docstring'''
snake_case : List[Any] = ''''''
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE__ ) ):
snake_case ,snake_case : Optional[Any] = os.path.split(SCREAMING_SNAKE_CASE__ )
if filepath != old_path:
snake_case : Dict = print_path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : int = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case : int = F'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
snake_case : int = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F'{md_prefix(SCREAMING_SNAKE_CASE__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(".")
| 638
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : int
snake_case : Node | None = None
snake_case : Node | None = None
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Node(1 )
UpperCamelCase__ = Node(2 )
UpperCamelCase__ = Node(3 )
UpperCamelCase__ = Node(4 )
UpperCamelCase__ = Node(5 )
return tree
def _UpperCamelCase (a__ :Node | None ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _UpperCamelCase (a__ :Node | None ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _UpperCamelCase (a__ :Node | None ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _UpperCamelCase (a__ :Node | None ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _UpperCamelCase (a__ :Node | None ):
"""simple docstring"""
UpperCamelCase__ = []
if root is None:
return output
UpperCamelCase__ = deque([root] )
while process_queue:
UpperCamelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _UpperCamelCase (a__ :Node | None , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = []
def populate_output(a__ :Node | None , a__ :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(a__ , a__ )
return output
def _UpperCamelCase (a__ :Node | None , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = []
def populate_output(a__ :Node | None , a__ :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(a__ , a__ )
return output
def _UpperCamelCase (a__ :Node | None ):
"""simple docstring"""
if root is None:
return []
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = height(a__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a__ , a__ ) )
UpperCamelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(a__ , a__ ) )
UpperCamelCase__ = 0
return output
def _UpperCamelCase (): # Main function for testing.
"""simple docstring"""
UpperCamelCase__ = make_tree()
print(f"""In-order Traversal: {inorder(a__ )}""" )
print(f"""Pre-order Traversal: {preorder(a__ )}""" )
print(f"""Post-order Traversal: {postorder(a__ )}""" , """\n""" )
print(f"""Height of Tree: {height(a__ )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(a__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(a__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(a__ , level=a__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 548
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class a__ ( nn.Module ):
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = hidden_states.shape
_lowercase : Dict = jax.image.resize(
_UpperCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
_lowercase : str = self.conv(_UpperCamelCase )
return hidden_states
class a__ ( nn.Module ):
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.conv(_UpperCamelCase )
return hidden_states
class a__ ( nn.Module ):
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : bool = None
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels
_lowercase : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowercase : Dict = nn.Conv(
_UpperCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowercase : Optional[Any] = nn.Dense(_UpperCamelCase , dtype=self.dtype )
_lowercase : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowercase : Tuple = nn.Dropout(self.dropout_prob )
_lowercase : Any = nn.Conv(
_UpperCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowercase : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowercase : Optional[int] = None
if use_nin_shortcut:
_lowercase : Any = nn.Conv(
_UpperCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
"""simple docstring"""
_lowercase : Any = hidden_states
_lowercase : Optional[Any] = self.norma(_UpperCamelCase )
_lowercase : str = nn.swish(_UpperCamelCase )
_lowercase : Any = self.conva(_UpperCamelCase )
_lowercase : Union[str, Any] = self.time_emb_proj(nn.swish(_UpperCamelCase ) )
_lowercase : Tuple = jnp.expand_dims(jnp.expand_dims(_UpperCamelCase , 1 ) , 1 )
_lowercase : Union[str, Any] = hidden_states + temb
_lowercase : Any = self.norma(_UpperCamelCase )
_lowercase : str = nn.swish(_UpperCamelCase )
_lowercase : int = self.dropout(_UpperCamelCase , _UpperCamelCase )
_lowercase : Union[str, Any] = self.conva(_UpperCamelCase )
if self.conv_shortcut is not None:
_lowercase : Any = self.conv_shortcut(_UpperCamelCase )
return hidden_states + residual
| 245
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711
|
import sys
_A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
snake_case = 1
for digit in s:
product *= int(A__ )
return product
def lowercase_ ( A__ = N ) -> int:
"""simple docstring"""
snake_case = -sys.maxsize - 1
snake_case = n[:13]
snake_case = 13
while cur_index < len(A__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case = max(A__ , str_eval(A__ ) )
snake_case = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 294
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = False , __lowercase = None , __lowercase = True , __lowercase = "arrow" , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
a__ : Dict = load_from_cache_file
a__ : int = file_format
a__ : int = Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a__ : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 136
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case ( self , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[int] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# warmup pass to apply optimizations
lowerCAmelCase__ :List[Any] = pipe(**self.get_dummy_inputs() )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[str] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[int] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Any = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ort.SessionOptions()
lowerCAmelCase__ :Optional[int] = False
return options
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Any = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :Optional[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[str] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Any = output.images
lowerCAmelCase__ :List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase__ :List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :List[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_( lowercase_ : Any="ro" , lowercase_ : Optional[int]="en" , lowercase_ : int="wmt16" , lowercase_ : int=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
_lowerCamelCase = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
_lowerCamelCase = datasets.load_dataset(lowercase_ , lowercase_ )
if save_dir is None:
_lowerCamelCase = F"""{dataset}-{pair}"""
_lowerCamelCase = Path(lowercase_ )
save_dir.mkdir(exist_ok=lowercase_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
_lowerCamelCase = '''val''' if split == '''validation''' else split
_lowerCamelCase = save_dir.joinpath(F"""{fn}.source""" )
_lowerCamelCase = save_dir.joinpath(F"""{fn}.target""" )
_lowerCamelCase = src_path.open('''w+''' )
_lowerCamelCase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_lowerCamelCase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 623
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=3_0 , __lowerCamelCase : Union[str, Any]=4_0_0 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=0.9 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
_snake_case = size if size is not None else {'''shortest_edge''': 3_0}
_snake_case = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize_and_center_crop
_snake_case = size
_snake_case = crop_pct
_snake_case = crop_size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = PoolFormerImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 3_0, '''width''': 3_0} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 103
|
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return 1 if input_a == input_a else 0
def snake_case ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 103
| 1
|
def snake_case__ ( __lowercase = 5_0 ) -> int:
"""simple docstring"""
A__ : Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 182
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
snake_case : Optional[int] = get_logger()
snake_case : Optional[dict] = None
class lowerCAmelCase__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Optional[Any] , _A : Optional[int]=None , _A : str=None , **_A : Tuple):
super().__init__(features=_A)
import jax
from jaxlib.xla_client import Device
if isinstance(_A , _A):
raise ValueError(
F'Expected {device} to be a `str` not {type(_A)}, as `jaxlib.xla_extension.Device` '
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`.")
A__ : Union[str, Any] = device if isinstance(_A , _A) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
A__ : str = str(jax.devices()[0])
A__ : List[Any] = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(_A): device for device in jax.devices()}
def _lowercase ( self : Tuple , _A : int):
import jax
import jax.numpy as jnp
if isinstance(_A , _A) and column:
if all(
isinstance(_A , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(_A , axis=0)
return column
def _lowercase ( self : List[str] , _A : Union[str, Any]):
import jax
import jax.numpy as jnp
if isinstance(_A , (str, bytes, type(_A))):
return value
elif isinstance(_A , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
A__ : Dict = {}
if isinstance(_A , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A__ : Union[str, Any] = {"dtype": jnp.intaa}
else:
A__ : int = {"dtype": jnp.intaa}
elif isinstance(_A , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
A__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image):
A__ : Any = np.asarray(_A)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A__ : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A , **{**default_dtype, **self.jnp_array_kwargs})
def _lowercase ( self : Dict , _A : Any):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(_A , "__array__") and not isinstance(_A , jax.Array):
A__ : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A) for substruct in data_struct])
elif isinstance(_A , (list, tuple)):
return self._consolidate([self.recursive_tensorize(_A) for substruct in data_struct])
return self._tensorize(_A)
def _lowercase ( self : Optional[Any] , _A : dict):
return map_nested(self._recursive_tensorize , _A , map_list=_A)
def _lowercase ( self : List[str] , _A : pa.Table):
A__ : Dict = self.numpy_arrow_extractor().extract_row(_A)
A__ : int = self.python_features_decoder.decode_row(_A)
return self.recursive_tensorize(_A)
def _lowercase ( self : int , _A : pa.Table):
A__ : int = self.numpy_arrow_extractor().extract_column(_A)
A__ : Optional[int] = self.python_features_decoder.decode_column(_A , pa_table.column_names[0])
A__ : Dict = self.recursive_tensorize(_A)
A__ : List[Any] = self._consolidate(_A)
return column
def _lowercase ( self : Tuple , _A : pa.Table):
A__ : Any = self.numpy_arrow_extractor().extract_batch(_A)
A__ : str = self.python_features_decoder.decode_batch(_A)
A__ : Optional[Any] = self.recursive_tensorize(_A)
for column_name in batch:
A__ : Optional[int] = self._consolidate(batch[column_name])
return batch
| 182
| 1
|
def __A ( _lowercase ):
'''simple docstring'''
_A = [0] * len(_lowercase )
_A = []
_A = [1] * len(_lowercase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowercase ) ):
if indegree[i] == 0:
queue.append(_lowercase )
while queue:
_A = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_A = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_lowercase )
print(max(_lowercase ) )
# Adjacency list of Graph
__A = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 484
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def __A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
'''simple docstring'''
_A = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
_A = v.half()
if save_path is None: # overwrite src_path
_A = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 484
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = out_features
__snake_case = num_labels
__snake_case = scope
__snake_case = num_stages
def __lowerCamelCase ( self ):
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCamelCase ( self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=__lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = UperNetForSemanticSegmentation(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase (lowercase__ , lowercase__ , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
__snake_case = UperNetModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__lowerCamelCase )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(__lowerCamelCase )
__snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__snake_case = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = UperNetForSemanticSegmentation.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __lowercase( ) -> Union[str, Any]:
__snake_case = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' ,repo_type='dataset' ,filename='ADE_val_00000001.jpg' )
__snake_case = Image.open(_A ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self ):
__snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__lowerCamelCase )
__snake_case = prepare_img()
__snake_case = processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
with torch.no_grad():
__snake_case = model(**__lowerCamelCase )
__snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__snake_case = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
def __lowerCamelCase ( self ):
__snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__lowerCamelCase )
__snake_case = prepare_img()
__snake_case = processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
with torch.no_grad():
__snake_case = model(**__lowerCamelCase )
__snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__snake_case = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 705
|
from __future__ import annotations
from typing import Any
def __lowercase( __snake_case : list ) -> int:
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__snake_case ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
| 0
|
"""simple docstring"""
def snake_case_ ( A_ : int = 1_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : str = set(range(3, A_, 2 ) )
primes.add(2 )
for p in range(3, A_, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, A_, A_ ) ) )
_lowerCamelCase : Tuple = [float(A_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A_, limit + 1, A_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'Salesforce/blip-image-captioning-base'
_snake_case : Union[str, Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
_snake_case : List[Any] = 'image_captioner'
_snake_case : Union[str, Any] = AutoModelForVisionaSeq
_snake_case : Dict = ['image']
_snake_case : Optional[int] = ['text']
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : "Image" ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0].strip()
| 98
| 0
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowerCAmelCase : Optional[int] = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
lowerCAmelCase : List[str] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = calculate_rouge(a , a , bootstrap_aggregation=a , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(a , a )
SCREAMING_SNAKE_CASE_ : Any = calculate_rouge(a , a , bootstrap_aggregation=a , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'rougeLsum'
SCREAMING_SNAKE_CASE_ : int = calculate_rouge(a , a , newline_sep=a , rouge_keys=[k] )[k]
SCREAMING_SNAKE_CASE_ : Any = calculate_rouge(a , a , newline_sep=a , rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['rouge1', 'rouge2', 'rougeL']
SCREAMING_SNAKE_CASE_ : Dict = calculate_rouge(a , a , newline_sep=a , rouge_keys=a )
SCREAMING_SNAKE_CASE_ : List[Any] = calculate_rouge(a , a , newline_sep=a , rouge_keys=a )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(a , a , newline_sep=a ) == calculate_rouge(a , a , newline_sep=a )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
SCREAMING_SNAKE_CASE_ : List[str] = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
SCREAMING_SNAKE_CASE_ : List[Any] = calculate_rouge(a , a , rouge_keys=['rougeLsum'] , newline_sep=a )['rougeLsum']
SCREAMING_SNAKE_CASE_ : Dict = calculate_rouge(a , a , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = Path('examples/seq2seq/test_data/wmt_en_ro' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(a , a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=a )
assert isinstance(a , a )
| 353
|
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def A_ ( a = 1_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
for i in range(2 , max_n + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] = pre_numerator
SCREAMING_SNAKE_CASE_ : str = 2 * i // 3 if i % 3 == 0 else 1
SCREAMING_SNAKE_CASE_ : Tuple = cur_numerator
SCREAMING_SNAKE_CASE_ : Tuple = e_cont * pre_numerator + temp
return sum_digits(a )
if __name__ == "__main__":
print(F'{solution() = }')
| 353
| 1
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( _lowercase , _lowercase , _lowercase ):
A__ = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 5_0257 , __UpperCAmelCase = 1024 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = None , __UpperCAmelCase = "gelu_new" , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 1e-5 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = False , ):
super().__init__()
lowerCAmelCase__ : int = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase__ : Tuple = prefix_inner_dim
lowerCAmelCase__ : str = prefix_hidden_dim
lowerCAmelCase__ : List[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase__ : Union[str, Any] = (
nn.Linear(self.prefix_hidden_dim , __UpperCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase__ : List[str] = GPTaConfig(
vocab_size=__UpperCAmelCase , n_positions=__UpperCAmelCase , n_embd=__UpperCAmelCase , n_layer=__UpperCAmelCase , n_head=__UpperCAmelCase , n_inner=__UpperCAmelCase , activation_function=__UpperCAmelCase , resid_pdrop=__UpperCAmelCase , embd_pdrop=__UpperCAmelCase , attn_pdrop=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase , initializer_range=__UpperCAmelCase , scale_attn_weights=__UpperCAmelCase , use_cache=__UpperCAmelCase , scale_attn_by_inverse_layer_idx=__UpperCAmelCase , reorder_and_upcast_attn=__UpperCAmelCase , )
lowerCAmelCase__ : str = GPTaLMHeadModel(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
lowerCAmelCase__ : int = self.transformer.transformer.wte(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.encode_prefix(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = self.decode_prefix(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase__ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase__ : List[str] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase__ : Optional[Any] = self.transformer(inputs_embeds=__UpperCAmelCase , labels=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
return torch.zeros(__UpperCAmelCase , self.prefix_length , dtype=torch.intaa , device=__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return self.encode_prefix(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = torch.split(__UpperCAmelCase , 1 , dim=0 )
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[str] = []
for feature in features:
lowerCAmelCase__ : int = self.decode_prefix(feature.to(__UpperCAmelCase ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.generate_beam(
input_embeds=__UpperCAmelCase , device=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase__ : Optional[int] = torch.stack(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.stack(__UpperCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __magic_name__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = 5 , __UpperCAmelCase = 67 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = None , ):
lowerCAmelCase__ : List[Any] = eos_token_id
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Union[str, Any] = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase , dtype=torch.int )
lowerCAmelCase__ : Tuple = torch.zeros(__UpperCAmelCase , device=__UpperCAmelCase , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase__ : int = input_embeds
else:
lowerCAmelCase__ : str = self.transformer.transformer.wte(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = self.transformer(inputs_embeds=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = outputs.logits
lowerCAmelCase__ : str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase__ : int = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = logits.topk(__UpperCAmelCase , -1 )
lowerCAmelCase__ : Any = generated.expand(__UpperCAmelCase , *generated.shape[1:] )
lowerCAmelCase__ , lowerCAmelCase__ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase__ : Optional[Any] = next_tokens
else:
lowerCAmelCase__ : List[Any] = tokens.expand(__UpperCAmelCase , *tokens.shape[1:] )
lowerCAmelCase__ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase__ : Optional[Any] = -float(np.inf )
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : List[Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase__ : Tuple = scores_sum / seq_lengths[:, None]
lowerCAmelCase__ , lowerCAmelCase__ : int = scores_sum_average.view(-1 ).topk(__UpperCAmelCase , -1 )
lowerCAmelCase__ : Tuple = next_tokens // scores_sum.shape[1]
lowerCAmelCase__ : str = seq_lengths[next_tokens_source]
lowerCAmelCase__ : Any = next_tokens % scores_sum.shape[1]
lowerCAmelCase__ : Union[str, Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase__ : List[Any] = tokens[next_tokens_source]
lowerCAmelCase__ : Any = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase__ : List[Any] = generated[next_tokens_source]
lowerCAmelCase__ : Union[str, Any] = scores_sum_average * seq_lengths
lowerCAmelCase__ : str = is_stopped[next_tokens_source]
lowerCAmelCase__ : Optional[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase__ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase__ : int = is_stopped + next_tokens.eq(__UpperCAmelCase ).squeeze()
if is_stopped.all():
break
lowerCAmelCase__ : Any = scores / seq_lengths
lowerCAmelCase__ : str = scores.argsort(descending=__UpperCAmelCase )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase__ : int = [tokens[i] for i in order]
lowerCAmelCase__ : Dict = torch.stack(__UpperCAmelCase , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 678
|
from manim import *
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 )
lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 )
lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : List[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 )
lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 678
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : list ) -> float:
__UpperCAmelCase: Union[str, Any] = 0
while len(_lowercase ) > 1:
__UpperCAmelCase: Dict = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__UpperCAmelCase: Any = files.index(min(_lowercase ) )
temp += files[min_index]
files.pop(_lowercase )
files.append(_lowercase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : list[int] , _lowercase : list[int] ) -> None:
__UpperCAmelCase: Dict = len(_lowercase )
print("""The following activities are selected:""" )
# The first activity is always selected
__UpperCAmelCase: Dict = 0
print(_lowercase , end=""",""" )
# Consider rest of the activities
for j in range(_lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowercase , end=""",""" )
__UpperCAmelCase: Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 466
| 1
|
UpperCamelCase = range(2, 20 + 1)
UpperCamelCase = [10**k for k in range(ks[-1] + 1)]
UpperCamelCase = {}
def A ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[Any] ) -> Any:
UpperCamelCase__ :List[Any] = sum(a_i[j] for j in range(lowercase__ , len(lowercase__ ) ) )
UpperCamelCase__ :str = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) , lowercase__ ) ) )
UpperCamelCase__ , UpperCamelCase__ :List[str] = 0, 0
UpperCamelCase__ :Union[str, Any] = n - i
UpperCamelCase__ :str = memo.get(lowercase__ )
if sub_memo is not None:
UpperCamelCase__ :Tuple = sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
UpperCamelCase__ :List[Any] = -1
for _k in range(len(lowercase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase__ :str = _k
break
if max_jump >= 0:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase__ :Tuple = diff + c
for j in range(min(lowercase__ , len(lowercase__ ) ) ):
UpperCamelCase__ , UpperCamelCase__ :Any = divmod(lowercase__ , 10 )
if new_c > 0:
add(lowercase__ , lowercase__ , lowercase__ )
else:
UpperCamelCase__ :Dict = []
else:
UpperCamelCase__ :int = {c: []}
UpperCamelCase__ :int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase__ , UpperCamelCase__ :Dict = next_term(lowercase__ , k - 1 , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase__ , UpperCamelCase__ :Dict = compute(lowercase__ , lowercase__ , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
UpperCamelCase__ :Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase__ :Optional[Any] = 0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__ , (diff, dn, k) )
return (diff, dn)
def A ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : int ) -> Tuple:
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase__ :Optional[Any] = i
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = 0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase__ :Any = ds_c + ds_b
diff += addend
UpperCamelCase__ :int = 0
for j in range(lowercase__ ):
UpperCamelCase__ :Union[str, Any] = a_i[j] + addend
UpperCamelCase__ , UpperCamelCase__ :str = divmod(lowercase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__ , lowercase__ , lowercase__ )
return diff, i - start_i
def A ( lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : List[str] ) -> Any:
for j in range(lowercase__ , len(lowercase__ ) ):
UpperCamelCase__ :Union[str, Any] = digits[j] + addend
if s >= 10:
UpperCamelCase__ , UpperCamelCase__ :int = divmod(lowercase__ , 10 )
UpperCamelCase__ :Optional[int] = addend // 10 + quotient
else:
UpperCamelCase__ :Union[str, Any] = s
UpperCamelCase__ :Dict = addend // 10
if addend == 0:
break
while addend > 0:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = divmod(lowercase__ , 10 )
digits.append(lowercase__ )
def A ( lowercase__ : int = 10**15 ) -> int:
UpperCamelCase__ :Any = [1]
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Dict = 0
while True:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = next_term(lowercase__ , 20 , i + dn , lowercase__ )
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase__ :Tuple = 0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
|
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 1
|
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 621
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 621
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
__SCREAMING_SNAKE_CASE = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__SCREAMING_SNAKE_CASE = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
__SCREAMING_SNAKE_CASE = black.format_str(lowerCamelCase ,mode=lowerCamelCase )
__SCREAMING_SNAKE_CASE = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(lowerCamelCase ,"""w""" ,newline="""\n""" ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=lowerCamelCase )
with open(lowerCamelCase ,"""r""" ) as f:
self.assertTrue(f.read() ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,lowerCamelCase ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,lowerCamelCase ) ,)
# Copy consistency with a really long name
__SCREAMING_SNAKE_CASE = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" ,f"""{long_class_name}SchedulerOutput""" ,re.sub("""Bert""" ,lowerCamelCase ,lowerCamelCase ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,lowerCamelCase ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,lowerCamelCase ) ,)
| 109
|
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase_ : ClassVar[Features] = Features({'image': Image()} )
UpperCamelCase_ : ClassVar[Features] = Features({'labels': ClassLabel} )
UpperCamelCase_ : str = "image"
UpperCamelCase_ : str = "labels"
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def UpperCAmelCase_ ( self : Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 716
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : float ) -> float:
"""simple docstring"""
return 0.0
def _A( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int ) -> tuple[int | float, int | float]:
'''simple docstring'''
__lowercase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowercase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _A( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None:
'''simple docstring'''
__lowercase = 512
__lowercase = [1] + [0] * (size - 1)
__lowercase = [filter_type.process(UpperCamelCase__ ) for item in inputs]
__lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowercase = np.abs(np.fft.fft(UpperCamelCase__ ) )
__lowercase = 20 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
__lowercase = get_bounds(UpperCamelCase__ , UpperCamelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(UpperCamelCase__ )
plt.show()
def _A( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None:
'''simple docstring'''
__lowercase = 512
__lowercase = [1] + [0] * (size - 1)
__lowercase = [filter_type.process(UpperCamelCase__ ) for item in inputs]
__lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowercase = np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) )
plt.show()
| 362
| 0
|
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> dict:
a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__UpperCamelCase).json()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 10) -> list[dict]:
a = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
a = requests.get(__UpperCamelCase).json()[:max_stories]
return [get_hackernews_story(__UpperCamelCase) for story_id in story_ids]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 10) -> str:
a = hackernews_top_stories(__UpperCamelCase)
return "\n".join("* [{title}]({url})".format(**__UpperCamelCase) for story in stories)
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 515
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Any = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : int = """audio-spectrogram-transformer"""
def __init__( self , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.0 , A=0.0 , A=0.0_2 , A=1e-12 , A=16 , A=True , A=10 , A=10 , A=1024 , A=128 , **A , ) -> Any:
'''simple docstring'''
super().__init__(**A )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = patch_size
a = qkv_bias
a = frequency_stride
a = time_stride
a = max_length
a = num_mel_bins
| 515
| 1
|
_lowercase : List[Any] ={
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 661
|
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661
| 1
|
def __lowerCAmelCase ( _UpperCamelCase ) -> int:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowerCamelCase__: List[Any] = hex_num[0] == """-"""
if is_negative:
lowerCamelCase__: Optional[int] = hex_num[1:]
try:
lowerCamelCase__: Dict = int(_UpperCamelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowerCamelCase__: Union[str, Any] = """"""
while int_num > 0:
lowerCamelCase__: Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
_lowercase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowercase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Union[str, Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
order.append(_UpperCamelCase )
return order
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = True
lowerCamelCase__: Optional[int] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return component
def __lowerCAmelCase ( _UpperCamelCase ) -> list[list[int]]:
'''simple docstring'''
lowerCamelCase__: List[Any] = len(_UpperCamelCase ) * [False]
lowerCamelCase__: dict[int, list[int]] = {vert: [] for vert in range(len(_UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCamelCase )
lowerCamelCase__: Tuple = []
for i, was_visited in enumerate(_UpperCamelCase ):
if not was_visited:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: Tuple = []
lowerCamelCase__: Optional[Any] = len(_UpperCamelCase ) * [False]
for i in range(len(_UpperCamelCase ) ):
lowerCamelCase__: Optional[Any] = order[len(_UpperCamelCase ) - i - 1]
if not visited[vert]:
lowerCamelCase__: Optional[Any] = find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
components_list.append(_UpperCamelCase )
return components_list
| 306
| 1
|
__UpperCamelCase : List[str] = 0 # The first color of the flag.
__UpperCamelCase : Any = 1 # The second color of the flag.
__UpperCamelCase : Union[str, Any] = 2 # The third color of the flag.
__UpperCamelCase : Union[str, Any] = (red, white, blue)
def snake_case_ ( __lowercase : List[str] ):
if not sequence:
return []
if len(__lowercase ) == 1:
return list(__lowercase )
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Any = len(__lowercase ) - 1
UpperCAmelCase_ : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCAmelCase_ : Optional[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCAmelCase_ : List[str] = sequence[high], sequence[mid]
high -= 1
else:
UpperCAmelCase_ : int = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Union[str, Any] = input('Enter numbers separated by commas:\n').strip()
__UpperCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
print(F'{dutch_national_flag_sort(unsorted)}')
| 718
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int]=7 , lowerCamelCase : int=3 , lowerCamelCase : Any=30 , lowerCamelCase : Union[str, Any]=400 , lowerCamelCase : List[str]=True , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[Any]=1 / 255 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase : Any=True , ) -> str:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_pad
def lowerCamelCase ( self : str ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCamelCase ( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=False ) -> str:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size["""shortest_edge"""] * h / w )
_UpperCAmelCase = self.size["""shortest_edge"""]
elif w > h:
_UpperCAmelCase = self.size["""shortest_edge"""]
_UpperCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
_UpperCAmelCase = self.size["""shortest_edge"""]
_UpperCAmelCase = self.size["""shortest_edge"""]
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
_UpperCAmelCase = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = DetrImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = DetrImageProcessingTester(self )
@property
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(lowerCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_pad""" ) )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
# prepare image and target
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {"""image_id""": 3_9769, """annotations""": target}
# encode them
_UpperCAmelCase = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
_UpperCAmelCase = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="""pt""" )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase ) )
@slow
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
# prepare image, target and masks_path
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
_UpperCAmelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_UpperCAmelCase = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
_UpperCAmelCase = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="""pt""" )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase ) )
# verify masks
_UpperCAmelCase = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase ) )
| 108
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self , A = True , A = None , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 255 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 384}
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
_SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 224 / 256
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_( self , A , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(A , size=A , default_to_square=A )
_SCREAMING_SNAKE_CASE = resize(image=A , size=A , resample=A , data_format=A , **A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A )
def snake_case_( self , A , A , A = None , **A , ) -> List[str]:
return rescale(A , scale=A , data_format=A , **A )
def snake_case_( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=A , mean=A , std=A ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(A , A ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 314
| 0
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =DownBlockaD # noqa F405
UpperCamelCase__ ="down"
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =ResnetDownsampleBlockaD # noqa F405
UpperCamelCase__ ="down"
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =AttnDownBlockaD # noqa F405
UpperCamelCase__ ="down"
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =CrossAttnDownBlockaD # noqa F405
UpperCamelCase__ ="down"
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = 32
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =SimpleCrossAttnDownBlockaD # noqa F405
UpperCamelCase__ ="down"
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =SkipDownBlockaD # noqa F405
UpperCamelCase__ ="down"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_skip_sample=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =AttnSkipDownBlockaD # noqa F405
UpperCamelCase__ ="down"
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =DownEncoderBlockaD # noqa F405
UpperCamelCase__ ="down"
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return super().get_dummy_input(include_temb=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = {
'in_channels': 32,
'out_channels': 32,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =AttnDownEncoderBlockaD # noqa F405
UpperCamelCase__ ="down"
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return super().get_dummy_input(include_temb=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = {
'in_channels': 32,
'out_channels': 32,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =UNetMidBlockaD # noqa F405
UpperCamelCase__ ="mid"
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = {
'in_channels': 32,
'temb_channels': 1_28,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =UNetMidBlockaDCrossAttn # noqa F405
UpperCamelCase__ ="mid"
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = 32
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCamelCase__ ="mid"
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase , _lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = 32
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =UpBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =ResnetUpsampleBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =CrossAttnUpBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = 32
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =SimpleCrossAttnUpBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ , include_encoder_hidden_states=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = 32
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =AttnUpBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =SkipUpBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =AttnSkipUpBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =UpDecoderBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return super().get_dummy_input(include_temb=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = {'in_channels': 32, 'out_channels': 32}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =AttnUpDecoderBlockaD # noqa F405
UpperCamelCase__ ="up"
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().get_dummy_input(include_temb=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = {'in_channels': 32, 'out_channels': 32}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(lowercase__ )
| 713
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
def __init__( self : int , lowercase__ : Tuple , lowercase__ : Union[str, Any]=13 , lowercase__ : Optional[Any]=7 , lowercase__ : List[str]=True , lowercase__ : Any=True , lowercase__ : int=True , lowercase__ : Tuple=True , lowercase__ : str=99 , lowercase__ : Optional[Any]=32 , lowercase__ : Dict=5 , lowercase__ : Tuple=4 , lowercase__ : Optional[Any]=37 , lowercase__ : Tuple="gelu" , lowercase__ : List[str]=0.1 , lowercase__ : Union[str, Any]=0.1 , lowercase__ : Union[str, Any]=5_12 , lowercase__ : Optional[Any]=16 , lowercase__ : int=2 , lowercase__ : Union[str, Any]=0.0_2 , lowercase__ : Optional[int]=3 , lowercase__ : List[str]=4 , lowercase__ : Any=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : List[str] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : Dict ):
_lowerCAmelCase = NystromformerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
_lowerCAmelCase = model(lowercase__ , token_type_ids=lowercase__ )
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Any ):
_lowerCAmelCase = NystromformerForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Optional[int] ):
_lowerCAmelCase = NystromformerForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Tuple ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = NystromformerForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[int] ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = NystromformerForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : List[str] ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = NystromformerForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = NystromformerModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = NystromformerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
_lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_lowerCAmelCase = model(lowercase__ )[0]
_lowerCAmelCase = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , lowercase__ )
_lowerCAmelCase = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = 'the [MASK] of Belgium is Brussels'
_lowerCAmelCase = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
_lowerCAmelCase = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
_lowerCAmelCase = tokenizer(lowercase__ , return_tensors='pt' )
with torch.no_grad():
_lowerCAmelCase = model(encoding.input_ids ).logits
_lowerCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowercase__ ) , 'capital' )
| 225
| 0
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase ( ) ->tuple[list[int], int]:
_SCREAMING_SNAKE_CASE = [randint(-1000 , 1000 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-5000 , 5000 )
return (arr, r)
lowercase_ = make_dataset()
def lowerCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) ->tuple[int, ...]:
for triplet in permutations(__lowerCamelCase , 3 ):
if sum(__lowerCamelCase ) == target:
return tuple(sorted(__lowerCamelCase ) )
return (0, 0, 0)
def lowerCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) ->tuple[int, int, int]:
arr.sort()
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase ( ) ->tuple[float, float]:
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=__lowerCamelCase , stmt=__lowerCamelCase , repeat=5 , number=1_0000 )
_SCREAMING_SNAKE_CASE = repeat(setup=__lowerCamelCase , stmt=__lowerCamelCase , repeat=5 , number=1_0000 )
return (min(__lowerCamelCase ), min(__lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 314
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 314
| 1
|
import re
def _snake_case ( __snake_case ):
_UpperCamelCase = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__snake_case , __snake_case ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 702
|
from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self : int , _A : int ):
_UpperCamelCase = size
# approximate the overall size of segment tree with given value
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
_UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase_ ( self : str , _A : int ):
return idx * 2
def UpperCamelCase_ ( self : Any , _A : int ):
return idx * 2 + 1
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ):
if left_element == right_element:
_UpperCamelCase = a[left_element - 1]
else:
_UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_A ) , _A , _A , _A )
self.build(self.right(_A ) , mid + 1 , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCamelCase = val
if left_element != right_element:
_UpperCamelCase = val
_UpperCamelCase = val
_UpperCamelCase = True
_UpperCamelCase = True
return True
_UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_A ) , _A , _A , _A , _A , _A )
self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
return True
def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCamelCase = (left_element + right_element) // 2
_UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A )
_UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A )
return max(_A , _A )
def __str__( self : Tuple ):
return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase = 15
_lowerCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 71
| 0
|
from __future__ import annotations
_UpperCAmelCase : Union[str, Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[list[list[int]], list[list[int]]]:
lowerCamelCase__ : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCAmelCase ) )
] # the reference grid
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCAmelCase ) )
] # the action grid
lowerCamelCase__ : Union[str, Any] = init[0]
lowerCamelCase__ : int = init[1]
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ : List[Any] = [[f, g, x, y]]
lowerCamelCase__ : Tuple = False # flag that is set when search is complete
lowerCamelCase__ : Union[str, Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ : List[Any] = cell.pop()
lowerCamelCase__ : Optional[int] = next_cell[2]
lowerCamelCase__ : Tuple = next_cell[3]
lowerCamelCase__ : int = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ : str = True
else:
for i in range(len(_UpperCAmelCase ) ): # to try out different valid actions
lowerCamelCase__ : Dict = x + DIRECTIONS[i][0]
lowerCamelCase__ : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ : List[Any] = g + cost
lowerCamelCase__ : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Optional[int] = i
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Dict = goal[0]
lowerCamelCase__ : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ : int = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ : Optional[Any] = xa
lowerCamelCase__ : List[str] = ya
invpath.append([x, y] )
lowerCamelCase__ : Optional[int] = []
for i in range(len(_UpperCAmelCase ) ):
path.append(invpath[len(_UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_UpperCAmelCase : Dict = [0, 0]
# all coordinates are given in format [y,x]
_UpperCAmelCase : int = [len(grid) - 1, len(grid[0]) - 1]
_UpperCAmelCase : Dict = 1
# the cost map which pushes the path closer to the goal
_UpperCAmelCase : Any = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_UpperCAmelCase : Dict = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_UpperCAmelCase : List[Any] = 99
_UpperCAmelCase ,_UpperCAmelCase : Dict = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 295
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """philschmid/bart-large-cnn-samsum"""
UpperCAmelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
UpperCAmelCase__ = """summarizer"""
UpperCAmelCase__ = AutoTokenizer
UpperCAmelCase__ = AutoModelForSeqaSeqLM
UpperCAmelCase__ = ["""text"""]
UpperCAmelCase__ = ["""text"""]
def A_ ( self : Tuple , UpperCAmelCase : List[Any] ) -> List[str]:
return self.pre_processor(UpperCAmelCase , return_tensors='pt' , truncation=UpperCAmelCase )
def A_ ( self : Optional[int] , UpperCAmelCase : Dict ) -> str:
return self.model.generate(**UpperCAmelCase )[0]
def A_ ( self : Tuple , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return self.pre_processor.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
| 295
| 1
|
'''simple docstring'''
def _UpperCamelCase ( _a : list , _a : list , _a : int ):
"""simple docstring"""
if len(_a ) != len(_a ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__UpperCamelCase : Union[str, Any] = [p / w for p, w in zip(_a , _a )]
# Creating a copy of the list and sorting profit/weight in ascending order
__UpperCamelCase : Union[str, Any] = sorted(_a )
# declaring useful variables
__UpperCamelCase : Any = len(_a )
__UpperCamelCase : Dict = 0
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : str = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__UpperCamelCase : Any = sorted_profit_by_weight[length - i - 1]
__UpperCamelCase : Tuple = profit_by_weight.index(_a )
__UpperCamelCase : Union[str, Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
a= [int(x) for x in input('''Input profits separated by spaces: ''').split()]
a= [int(x) for x in input('''Input weights separated by spaces: ''').split()]
a= int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 287
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a= {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 287
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """BlipImageProcessor"""
UpperCamelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] )->int:
_UpperCAmelCase = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
def __call__( self : Optional[int] , __UpperCamelCase : ImageInput = None , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : Any , )->BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def lowercase__ ( self : Dict , *__UpperCamelCase : Dict , **__UpperCamelCase : Tuple )->Union[str, Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , *__UpperCamelCase : Any , **__UpperCamelCase : int )->Union[str, Any]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : Union[str, Any] )->Dict:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 602
|
"""simple docstring"""
__A : Optional[int] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 602
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,__snake_case = None ,__snake_case = None ,) -> Tuple:
'''simple docstring'''
if config_name_or_path is None:
lowerCamelCase__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ = question_encoder_name_or_path
lowerCamelCase__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
lowerCamelCase__ = RagConfig.from_pretrained(__snake_case )
lowerCamelCase__ = AutoConfig.from_pretrained(__snake_case )
lowerCamelCase__ = AutoConfig.from_pretrained(__snake_case )
lowerCamelCase__ = gen_config
lowerCamelCase__ = question_encoder_config
lowerCamelCase__ = model_class.from_pretrained_question_encoder_generator(
__snake_case ,__snake_case ,config=__snake_case )
rag_model.save_pretrained(__snake_case )
# Sanity check.
model_class.from_pretrained(__snake_case )
# Save tokenizers.
lowerCamelCase__ = AutoTokenizer.from_pretrained(__snake_case )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
lowerCamelCase__ = AutoTokenizer.from_pretrained(__snake_case )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
_a = parser.parse_args()
_a = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 29
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
from math import factorial, pi
def UpperCamelCase_ ( __a , __a = 30 ) -> float:
if not isinstance(__a , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__a , __a ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
a__ : Optional[Any] = float(__a )
a__ : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__a ) )
def UpperCamelCase_ ( __a , __a = 30 ) -> float:
if not isinstance(__a , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__a , __a ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
a__ : Union[str, Any] = float(__a )
a__ : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 37
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=UpperCamelCase__ )
__lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__lowerCamelCase = parser.parse_args()
if not hasattr(UpperCamelCase__ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 469
| 0
|
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
if num < 0:
return False
lowercase = num
lowercase = 0
while num > 0:
lowercase = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowercase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowercase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowercase = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowercase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowercase = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowercase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowercase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowercase = key[key.find('block' ) + len('block' )]
lowercase = key.replace(f"""block{idx}""" , f"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowercase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowercase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowercase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowercase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowercase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowercase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowercase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowercase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase = key[key.find('linear_c' ) + len('linear_c' )]
lowercase = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowercase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowercase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowercase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowercase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowercase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowercase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowercase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowercase = key.replace('module.last_layer_depth' , 'head.head' )
lowercase = value
return new_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowercase = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowercase = kv_weight[
: config.hidden_sizes[i], :
]
lowercase = kv_bias[: config.hidden_sizes[i]]
lowercase = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase = kv_bias[config.hidden_sizes[i] :]
def __snake_case ( ):
"""simple docstring"""
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ):
"""simple docstring"""
lowercase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowercase = GLPNImageProcessor()
# prepare image
lowercase = prepare_img()
lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowercase = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowercase = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowercase = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowercase = model(_UpperCAmelCase )
lowercase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowercase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowercase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowercase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
__magic_name__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 314
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.