code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''pegasus'''
__lowercase : Optional[int] = ['''past_key_values''']
__lowercase : Union[str, Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,__UpperCAmelCase=5_0265 ,__UpperCAmelCase=1024 ,__UpperCAmelCase=12 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=16 ,__UpperCAmelCase=12 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=16 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1024 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=1 ,**__UpperCAmelCase ,) -> int:
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = d_model
lowerCAmelCase__ : str = encoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = encoder_layers
lowerCAmelCase__ : Optional[Any] = encoder_attention_heads
lowerCAmelCase__ : List[Any] = decoder_ffn_dim
lowerCAmelCase__ : Tuple = decoder_layers
lowerCAmelCase__ : int = decoder_attention_heads
lowerCAmelCase__ : List[str] = dropout
lowerCAmelCase__ : List[Any] = attention_dropout
lowerCAmelCase__ : int = activation_dropout
lowerCAmelCase__ : List[Any] = activation_function
lowerCAmelCase__ : Tuple = init_std
lowerCAmelCase__ : List[Any] = encoder_layerdrop
lowerCAmelCase__ : List[str] = decoder_layerdrop
lowerCAmelCase__ : Dict = use_cache
lowerCAmelCase__ : Optional[int] = encoder_layers
lowerCAmelCase__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,is_encoder_decoder=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,forced_eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ,)
@property
def UpperCAmelCase_ ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ) -> int:
return self.d_model
| 37
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> str:
SCREAMING_SNAKE_CASE = int(SCREAMING_SNAKE_CASE_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(SCREAMING_SNAKE_CASE_ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> str:
SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ).strip()
if not number:
raise ValueError('No input value was provided' )
SCREAMING_SNAKE_CASE = '-' if number.startswith('-' ) else ''
SCREAMING_SNAKE_CASE = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F'{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 113
| 0
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Optional[int] = split_dict._to_yaml_list()
assert len(_lowercase ) == len(_lowercase )
UpperCAmelCase : List[Any] = SplitDict._from_yaml_list(_lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase : List[str] = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase : int = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=_lowercase ), SplitInfo(dataset_name="""my_dataset""" )] )
def __lowerCamelCase ( _lowercase ) -> List[str]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase : Optional[Any] = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 350
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A = None , A = None , A = False , **A , ) -> Tuple:
super().__init__(features=A , cache_dir=A , keep_in_memory=A , **A )
UpperCAmelCase : Any = Sql(
cache_dir=A , features=A , sql=A , con=A , **A , )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = None
UpperCAmelCase : Any = None
UpperCAmelCase : int = None
UpperCAmelCase : int = None
self.builder.download_and_prepare(
download_config=A , download_mode=A , verification_mode=A , base_path=A , )
# Build dataset for splits
UpperCAmelCase : str = self.builder.as_dataset(
split="""train""" , verification_mode=A , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self , A , A , A , A = None , A = None , **A , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase : Dict = dataset
UpperCAmelCase : List[Any] = name
UpperCAmelCase : Any = con
UpperCAmelCase : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase : Optional[Any] = num_proc
UpperCAmelCase : str = to_sql_kwargs
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.to_sql_kwargs.pop("""sql""" , A )
UpperCAmelCase : str = self.to_sql_kwargs.pop("""con""" , A )
UpperCAmelCase : Union[str, Any] = self.to_sql_kwargs.pop("""index""" , A )
UpperCAmelCase : str = self._write(index=A , **self.to_sql_kwargs )
return written
def _lowercase( self , A ) -> Any:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = args
UpperCAmelCase : Union[str, Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCAmelCase : int = query_table(
table=self.dataset.data , key=slice(A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase : Any = batch.to_pandas()
UpperCAmelCase : List[Any] = df.to_sql(self.name , self.con , index=A , **A )
return num_rows or len(A )
def _lowercase( self , A , **A ) -> int:
UpperCAmelCase : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase , UpperCAmelCase : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , A , A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 338
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str]=10 ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
for _ in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any=10 ):
"""simple docstring"""
__UpperCAmelCase : List[str] = []
for step in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = os.path.join(lowercase_ , """schedule.bin""" )
torch.save(scheduler.state_dict() , lowercase_ )
__UpperCAmelCase : List[Any] = torch.load(lowercase_ )
scheduler.load_state_dict(lowercase_ )
return lrs
@require_torch
class _A ( unittest.TestCase ):
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__ )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__ )
__UpperCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase : int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase : Any = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__UpperCAmelCase : List[Any] = criterion(UpperCAmelCase__ , UpperCAmelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__ )
__UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase : int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase : List[str] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase__ , weight_decay=0.0 , relative_step=UpperCAmelCase__ , scale_parameter=UpperCAmelCase__ , warmup_init=UpperCAmelCase__ , )
for _ in range(1_000 ):
__UpperCAmelCase : int = criterion(UpperCAmelCase__ , UpperCAmelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : List[Any] = 10
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__ , msg=UpperCAmelCase__ )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__UpperCAmelCase : List[str] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__UpperCAmelCase , __UpperCAmelCase : str = data
__UpperCAmelCase : Union[str, Any] = scheduler_func(self.optimizer , **UpperCAmelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__UpperCAmelCase : List[str] = unwrap_schedule(UpperCAmelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase__ , UpperCAmelCase__ , tol=1E-2 , msg=f'failed for {scheduler_func} in normal scheduler' , )
__UpperCAmelCase : Any = scheduler_func(self.optimizer , **UpperCAmelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase__ ) # wrap to test picklability of the schedule
__UpperCAmelCase : List[str] = unwrap_and_save_reload_schedule(UpperCAmelCase__ , self.num_steps )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ , msg=f'failed for {scheduler_func} in save and reload' )
class _A :
def __init__( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = fn
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.fn(*UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = list(map(self , scheduler.lr_lambdas ) )
| 254
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14
| 0
|
from torch import nn
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''lilt'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=None , lowercase=4 , lowercase=1_0_2_4 , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , **lowercase )
A_ : str = vocab_size
A_ : Tuple = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Union[str, Any] = hidden_act
A_ : int = intermediate_size
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : Any = initializer_range
A_ : str = layer_norm_eps
A_ : Optional[int] = position_embedding_type
A_ : Union[str, Any] = classifier_dropout
A_ : Optional[Any] = channel_shrink_ratio
A_ : int = max_ad_position_embeddings
| 192
| 0
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A__ ( ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
while True:
i += 1
t_num += i
if count_divisors(__lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 299
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 1
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a :Tuple = None
a :int = logging.get_logger(__name__)
a :Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a :Optional[Any] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
a :Optional[int] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :Optional[int] = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE :List[str] = TaTokenizer
_SCREAMING_SNAKE_CASE :List[int] = []
def __init__( self , _a=None , _a=None , _a="</s>" , _a="<unk>" , _a="<pad>" , _a=100 , _a=None , **_a , ) -> Any:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE__ : Dict = [f'''<extra_id_{i}>''' for i in range(_a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE__ : Dict = len(set(filter(lambda _a : bool("""extra_id_""" in str(_a ) ) , _a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
_a , tokenizer_file=_a , eos_token=_a , unk_token=_a , pad_token=_a , extra_ids=_a , additional_special_tokens=_a , **_a , )
SCREAMING_SNAKE_CASE__ : int = vocab_file
SCREAMING_SNAKE_CASE__ : List[str] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : Any = extra_ids
@staticmethod
def _a ( _a , _a , _a ) -> Tuple:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE__ : Tuple = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , _a , )
return max_model_length
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _a ( self ) -> int:
"""simple docstring"""
return list(
set(filter(lambda _a : bool(re.search(r"""<extra_id_\d+>""" , _a ) ) is not None , self.additional_special_tokens ) ) )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return [self.convert_tokens_to_ids(_a ) for token in self.get_sentinel_tokens()]
| 56
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( ) -> Iterator[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def _lowercase ( __lowerCAmelCase = 200_0000 ) -> int:
return sum(takewhile(lambda __lowerCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 56
| 1
|
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase : Any = ""
UpperCamelCase : List[Any] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase , UpperCamelCase : List[str] = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase : Any = [1 for i in range(len(_lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
UpperCamelCase : str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase : Optional[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase : List[str] = j - k + 1 # noqa: E741
UpperCamelCase : Union[str, Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase : Union[str, Any] = length[j]
UpperCamelCase : List[Any] = j
# create that string
UpperCamelCase : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__UpperCamelCase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
__UpperCamelCase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
__UpperCamelCase : str = "question"
__UpperCamelCase : str = "context"
__UpperCamelCase : str = "answers"
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 121
| 0
|
'''simple docstring'''
from math import ceil
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(range(0 , _UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_SCREAMING_SNAKE_CASE =[]
for i in device_map_blocks:
if device_map_blocks.count(_UpperCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_UpperCamelCase )
# Missing blocks
_SCREAMING_SNAKE_CASE =[i for i in blocks if i not in device_map_blocks]
_SCREAMING_SNAKE_CASE =[i for i in device_map_blocks if i not in blocks]
if len(_UpperCamelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(_UpperCamelCase ) )
if len(_UpperCamelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(_UpperCamelCase ) )
if len(_UpperCamelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(_UpperCamelCase ) )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(range(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(ceil(n_layers / len(_UpperCamelCase ) ) )
_SCREAMING_SNAKE_CASE =[layers[i : i + n_blocks] for i in range(0 , _UpperCamelCase , _UpperCamelCase )]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
| 114
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "T5Config"
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
| 114
| 1
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = BlenderbotSmallTokenizer
a : Tuple = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase__ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCamelCase__ : Tuple = dict(zip(__SCREAMING_SNAKE_CASE, range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : str = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCamelCase__ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def UpperCamelCase__ ( self, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = '''adapt act apte'''
UpperCamelCase__ : List[str] = '''adapt act apte'''
return input_text, output_text
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ : Any = '''adapt act apte'''
UpperCamelCase__ : int = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCamelCase__ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ), __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
UpperCamelCase__ : Optional[int] = '''I am a small frog.'''
UpperCamelCase__ : int = tok([src_text], padding=__SCREAMING_SNAKE_CASE, truncation=__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase__ : Tuple = tok.batch_decode(__SCREAMING_SNAKE_CASE, skip_special_tokens=__SCREAMING_SNAKE_CASE, clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCamelCase__ : int = '''I am a small frog .'''
UpperCamelCase__ : Union[str, Any] = '''.'''
UpperCamelCase__ : Union[str, Any] = tok(__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase__ : Optional[int] = tok(__SCREAMING_SNAKE_CASE )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 201
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['''OwlViTFeatureExtractor''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] ) -> int:
_lowerCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] ) -> List[str]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowerCamelCase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowerCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict ) -> str:
_lowerCamelCase = dct.pop(lowercase_ )
_lowerCamelCase = val
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Union[str, Any]:
if "handwritten" in checkpoint_url:
_lowerCamelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCamelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_lowerCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Dict ) -> List[str]:
_lowerCamelCase = ViTConfig(image_size=3_84 , qkv_bias=lowercase_ )
_lowerCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowerCamelCase = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowerCamelCase = 10_24
_lowerCamelCase = 40_96
_lowerCamelCase = 24
_lowerCamelCase = 16
_lowerCamelCase = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCamelCase = False
_lowerCamelCase = '''relu'''
_lowerCamelCase = 10_24
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
# load HuggingFace model
_lowerCamelCase = ViTModel(lowercase_ , add_pooling_layer=lowercase_ )
_lowerCamelCase = TrOCRForCausalLM(lowercase_ )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
# load state_dict of original model, rename some keys
_lowerCamelCase = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' , check_hash=lowercase_ )['''model''']
_lowerCamelCase = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowerCamelCase = state_dict.pop(lowercase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
_lowerCamelCase = val
else:
_lowerCamelCase = val
# load state dict
model.load_state_dict(lowercase_ )
# Check outputs on an image
_lowerCamelCase = ViTImageProcessor(size=encoder_config.image_size )
_lowerCamelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
_lowerCamelCase = TrOCRProcessor(lowercase_ , lowercase_ )
_lowerCamelCase = processor(images=prepare_img(lowercase_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
_lowerCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowerCamelCase = model(pixel_values=lowercase_ , decoder_input_ids=lowercase_ )
_lowerCamelCase = outputs.logits
_lowerCamelCase = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowercase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 73
| 0
|
def __A ( __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
_UpperCAmelCase = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(__lowerCAmelCase ) , 'Postfix'.center(__lowerCAmelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=' | ' , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def __A ( __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
_UpperCAmelCase = ')' # change "(" to ")"
elif infix[i] == ")":
_UpperCAmelCase = '(' # change ")" to "("
return (infix_2_postfix(''.join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_a = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
_a = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 39
|
def UpperCamelCase (lowercase_: int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ : List[str] = 10**n
A__ : Any = 28433 * (pow(2 , 7830457 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 192
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCamelCase = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCamelCase = model(__a )["last_hidden_state"]
UpperCamelCase = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCamelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 244
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return choice(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = random_pivot(_SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
UpperCamelCase = [e for e in lst if e < pivot]
UpperCamelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(_SCREAMING_SNAKE_CASE , k - len(_SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
| 1
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a ( _lowerCamelCase ):
def A_ ( self : str ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = 8
# DPR tok
snake_case_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
snake_case_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case_ = {'''unk_token''': '''<unk>'''}
snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A_ ( self : Union[str, Any] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A_ ( self : int ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def A_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ):
snake_case_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A_ ( self : str ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
snake_case_ = dataset
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A_ ( self : str , lowercase_ : bool ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
snake_case_ = os.path.join(self.tmpdirname , '''dataset''' )
snake_case_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , )
return retriever
def A_ ( self : Tuple ):
snake_case_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
snake_case_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
snake_case_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(lowercase_ , open(lowercase_ , '''wb''' ) )
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A_ ( self : Optional[Any] ):
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : str ):
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
snake_case_ = self.get_dummy_dataset()
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : int ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : int ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : str ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : Any ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : Any ):
snake_case_ = 1
snake_case_ = self.get_dummy_legacy_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : int ):
snake_case_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : List[str] ):
import torch
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
snake_case_ ,snake_case_ ,snake_case_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , np.ndarray )
snake_case_ = retriever(
lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors='''pt''' , )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : Tuple ):
snake_case_ = self.get_dpr_ctx_encoder_tokenizer()
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
retriever.set_ctx_encoder_tokenizer(lowercase_ )
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
self.assertEqual(
len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowercase_ ) # check for doc token related keys in dictionary.
| 56
|
'''simple docstring'''
from collections import defaultdict
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCAmelCase )
if ret % 2 == 0:
cuts.append(__UpperCAmelCase )
return ret
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a ,a : Dict = 10, 9
a : Dict = defaultdict(list)
a : dict[int, bool] = {}
a : list[int] = []
a : Tuple = 0
a : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 56
| 1
|
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __UpperCAmelCase :
def __init__( self: Tuple , UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 512
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ )
# Also check the case where encoder outputs are not passed
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : List[str] = False
__snake_case : Union[str, Any] = False
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable )
else:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase_ )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple , __lowercase : int , __lowercase : int ) -> Any:
__UpperCAmelCase : Any = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : Dict = self._get_uniform_logits(batch_size=2 , length=__lowercase )
# tweak scores to not be uniform anymore
__UpperCAmelCase : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__UpperCAmelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__UpperCAmelCase : Any = jax.nn.softmax(__lowercase , axis=-1 )
__UpperCAmelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
__UpperCAmelCase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__lowercase , scores.copy() , cur_len=__lowercase ) , axis=-1 )
__UpperCAmelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(__lowercase , scores.copy() , cur_len=__lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : int = None
__UpperCAmelCase : int = 10
__UpperCAmelCase : Optional[Any] = 2
# create ramp distribution
__UpperCAmelCase : str = np.broadcast_to(np.arange(__lowercase )[None, :] , (batch_size, vocab_size) ).copy()
__UpperCAmelCase : Any = ramp_logits[1:, : vocab_size // 2] + vocab_size
__UpperCAmelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase : Optional[int] = top_k_warp(__lowercase , __lowercase , cur_len=__lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__UpperCAmelCase : List[Any] = 5
__UpperCAmelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__UpperCAmelCase : Union[str, Any] = np.broadcast_to(np.arange(__lowercase )[None, :] , (batch_size, length) ).copy()
__UpperCAmelCase : str = top_k_warp_safety_check(__lowercase , __lowercase , cur_len=__lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
__UpperCAmelCase : Any = None
__UpperCAmelCase : List[Any] = 10
__UpperCAmelCase : str = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__UpperCAmelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__UpperCAmelCase : str = FlaxTopPLogitsWarper(0.8 )
__UpperCAmelCase : Tuple = np.exp(top_p_warp(__lowercase , __lowercase , cur_len=__lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__UpperCAmelCase : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__UpperCAmelCase : str = np.broadcast_to(np.arange(__lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__UpperCAmelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__UpperCAmelCase : Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__UpperCAmelCase : Dict = top_p_warp(__lowercase , __lowercase , cur_len=__lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : int = 20
__UpperCAmelCase : Optional[Any] = 4
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowercase )
# check that min length is applied at length 5
__UpperCAmelCase : Tuple = ids_tensor((batch_size, 20) , vocab_size=20 )
__UpperCAmelCase : Dict = 5
__UpperCAmelCase : List[str] = self._get_uniform_logits(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = min_dist_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(__lowercase , __lowercase )
__UpperCAmelCase : int = 15
__UpperCAmelCase : int = min_dist_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertFalse(jnp.isinf(__lowercase ).any() )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = 20
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowercase )
# check that all scores are -inf except the bos_token_id score
__UpperCAmelCase : List[str] = ids_tensor((batch_size, 1) , vocab_size=20 )
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : str = self._get_uniform_logits(__lowercase , __lowercase )
__UpperCAmelCase : Dict = logits_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : Optional[int] = self._get_uniform_logits(__lowercase , __lowercase )
__UpperCAmelCase : int = logits_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertFalse(jnp.isinf(__lowercase ).any() )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[str] = 5
__UpperCAmelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowercase , eos_token_id=__lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
__UpperCAmelCase : int = ids_tensor((batch_size, 4) , vocab_size=20 )
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : Optional[int] = self._get_uniform_logits(__lowercase , __lowercase )
__UpperCAmelCase : int = logits_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : List[str] = self._get_uniform_logits(__lowercase , __lowercase )
__UpperCAmelCase : Dict = logits_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertFalse(jnp.isinf(__lowercase ).any() )
def UpperCAmelCase ( self : Optional[int] ) -> int:
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Optional[int] = 10
__UpperCAmelCase : Union[str, Any] = 15
__UpperCAmelCase : Optional[int] = 2
__UpperCAmelCase : int = 1
__UpperCAmelCase : Optional[Any] = 15
# dummy input_ids and scores
__UpperCAmelCase : Dict = ids_tensor((batch_size, sequence_length) , __lowercase )
__UpperCAmelCase : Optional[Any] = input_ids.copy()
__UpperCAmelCase : str = self._get_uniform_logits(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = scores.copy()
# instantiate all dist processors
__UpperCAmelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase : Optional[int] = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowercase )
__UpperCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowercase )
__UpperCAmelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowercase , eos_token_id=__lowercase )
__UpperCAmelCase : str = 10
# no processor list
__UpperCAmelCase : Any = temp_dist_warp(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : int = top_k_warp(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : Any = top_p_warp(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : Any = min_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : Any = bos_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : Dict = eos_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
# with processor list
__UpperCAmelCase : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase : Union[str, Any] = processor(__lowercase , __lowercase , cur_len=__lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase ( self : Dict ) -> int:
__UpperCAmelCase : str = 4
__UpperCAmelCase : Any = 10
__UpperCAmelCase : List[str] = 15
__UpperCAmelCase : int = 2
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : Union[str, Any] = 15
# dummy input_ids and scores
__UpperCAmelCase : int = ids_tensor((batch_size, sequence_length) , __lowercase )
__UpperCAmelCase : Optional[int] = input_ids.copy()
__UpperCAmelCase : Dict = self._get_uniform_logits(__lowercase , __lowercase )
__UpperCAmelCase : int = scores.copy()
# instantiate all dist processors
__UpperCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase : Tuple = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowercase )
__UpperCAmelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowercase )
__UpperCAmelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowercase , eos_token_id=__lowercase )
__UpperCAmelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(__lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : str ):
__UpperCAmelCase : List[str] = temp_dist_warp(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : str = top_k_warp(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : Any = top_p_warp(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : int = min_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : List[str] = bos_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
__UpperCAmelCase : List[Any] = eos_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
return scores
# with processor list
def run_processor_list(__lowercase : List[str] , __lowercase : Dict , __lowercase : Tuple ):
__UpperCAmelCase : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase : str = processor(__lowercase , __lowercase , cur_len=__lowercase )
return scores
__UpperCAmelCase : List[Any] = jax.jit(__lowercase )
__UpperCAmelCase : Union[str, Any] = jax.jit(__lowercase )
__UpperCAmelCase : List[str] = jitted_run_no_processor_list(__lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Dict = jitted_run_processor_list(__lowercase , __lowercase , __lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 114
|
from math import log
from scipy.constants import Boltzmann, physical_constants
a : Any = 300 # TEMPERATURE (unit = K)
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Any = None
A__ : Dict = logging.get_logger(__name__)
A__ : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
A__ : Optional[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
A__ : Any = '▁'
# Segments (not really needed)
A__ : Any = 0
A__ : List[Any] = 1
A__ : int = 2
A__ : Optional[Any] = 3
A__ : Tuple = 4
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :List[Any] = "left"
_UpperCAmelCase :List[str] = XLNetTokenizer
def __init__( self : int , snake_case__ : List[str]=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=False , snake_case__ : Any=True , snake_case__ : str=False , snake_case__ : Optional[Any]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<unk>" , snake_case__ : int="<sep>" , snake_case__ : str="<pad>" , snake_case__ : Optional[int]="<cls>" , snake_case__ : Union[str, Any]="<mask>" , snake_case__ : int=["<eop>", "<eod>"] , **snake_case__ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
lowerCamelCase_ : str =3
lowerCamelCase_ : Dict =do_lower_case
lowerCamelCase_ : Optional[Any] =remove_space
lowerCamelCase_ : Optional[int] =keep_accents
lowerCamelCase_ : str =vocab_file
lowerCamelCase_ : Dict =False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : Tuple =[self.sep_token_id]
lowerCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : str =[self.sep_token_id]
lowerCamelCase_ : Optional[int] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 209
|
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : list[int] ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if curr_ind == len(lowerCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCamelCase__ ) ):
if valid_connection(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Insert current vertex into path as next transition
lowerCamelCase_ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase_ : int =-1
return False
def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int = 0 ) -> list[int]:
lowerCamelCase_ : Optional[Any] =[-1] * (len(lowerCamelCase__ ) + 1)
# initialize start and end of path with starting index
lowerCamelCase_ : Optional[int] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , 1 ) else []
| 209
| 1
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase :
def __init__( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : Tuple=24 , UpperCAmelCase : Any=2 , UpperCAmelCase : Tuple=6 , UpperCAmelCase : int=37 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : str=512 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : int=2 , UpperCAmelCase : List[str]=0.0_2 , UpperCAmelCase : int=3 , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=1000 , ) -> int:
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Tuple = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Optional[int] = use_input_mask
lowerCamelCase__ : Any = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = type_vocab_size
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : str = range_bbox
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ : Union[str, Any] = bbox[i, j, 3]
lowerCamelCase__ : List[str] = bbox[i, j, 1]
lowerCamelCase__ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ : Optional[int] = bbox[i, j, 2]
lowerCamelCase__ : str = bbox[i, j, 0]
lowerCamelCase__ : str = t
lowerCamelCase__ : Any = None
if self.use_input_mask:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Tuple = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : List[str] ) -> Tuple:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A_ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , ) -> List[str]:
lowerCamelCase__ : str = LiltModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , bbox=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , ) -> int:
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : List[str] = LiltForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Dict = model(
UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , ) -> Any:
lowerCamelCase__ : Optional[int] = LiltForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCamelCase__ : Any = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
return True
def A_ ( self : Union[str, Any] ) -> int:
lowerCamelCase__ : int = LiltModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A_ ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : Dict ) -> List[Any]:
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Tuple:
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
@slow
def A_ ( self : List[str] ) -> Tuple:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] = LiltModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(UpperCAmelCase )
lowerCamelCase__ : List[Any] = torch.tensor([[1, 2]] , device=UpperCAmelCase )
lowerCamelCase__ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase )
lowerCamelCase__ : List[str] = torch.Size([1, 2, 768] )
lowerCamelCase__ : Any = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase , atol=1e-3 ) )
| 50
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='utf-8' ,check=SCREAMING_SNAKE_CASE__ ,)
assert hasattr(self ,'env')
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase : Any = {
'enabled': True,
'processes_per_host': 8,
}
__lowerCamelCase : List[Any] = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__lowerCamelCase : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__lowerCamelCase : List[str] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=SCREAMING_SNAKE_CASE__ ,instance_type=self.instance_type ,debugger_hook_config=SCREAMING_SNAKE_CASE__ ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} ,metric_definitions=self.env.metric_definitions ,distribution=SCREAMING_SNAKE_CASE__ ,py_version='py36' ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
# create estimator
__lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE__)
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' ,9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,SCREAMING_SNAKE_CASE__)
| 73
| 0
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_A : Union[str, Any] =object()
# For specifying empty leaf dict `{}`
_A : Any =object()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCamelCase ) - len(UpperCamelCase ) + 1 ):
lowerCamelCase__ : Optional[Any] = [x.match(UpperCamelCase ) for x, y in zip(UpperCamelCase , ks[i:] )]
if matches and all(UpperCamelCase ):
return True
return False
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
def replace(UpperCamelCase , UpperCamelCase ):
for rule, replacement in rules:
if _match(UpperCamelCase , UpperCamelCase ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : Any = _get_partition_rules()
lowerCamelCase__ : List[str] = _replacement_rules(UpperCamelCase )
lowerCamelCase__ : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase )}
lowerCamelCase__ : Optional[Any] = {k: replace(UpperCamelCase , UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase ) )
| 129
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list[str]:
if nth_term == "":
return [""]
lowerCamelCase__ : str = int(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = int(UpperCamelCase )
lowerCamelCase__ : list[str] = []
for temp in range(int(UpperCamelCase ) ):
series.append(f'''1 / {pow(temp + 1 , int(UpperCamelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Optional[Any] =int(input('''Enter the last number (nth term) of the P-Series'''))
_A : List[str] =int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 129
| 1
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_choices
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=SCREAMING_SNAKE_CASE_ , )
return config, input_ids, attention_mask
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase_ (self ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class __A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCamelCase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = (1, 11, 7_68)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 244
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __A( nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = None , ):
super().__init__()
UpperCamelCase__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , sample_size=SCREAMING_SNAKE_CASE_ , num_vector_embeds=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase__ = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase__ = [1, 0]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ):
UpperCamelCase__ = hidden_states
UpperCamelCase__ = []
UpperCamelCase__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase__ = self.transformer_index_for_condition[i]
UpperCamelCase__ = self.transformers[transformer_index](
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 244
| 1
|
'''simple docstring'''
def _A ( _lowerCAmelCase = 50 ):
"""simple docstring"""
__lowercase =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 48
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """bart"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , _lowerCAmelCase : Any=5_0_2_6_5 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Any=4_0_9_6 , _lowerCAmelCase : List[str]=1_6 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Dict=4_0_9_6 , _lowerCAmelCase : Optional[Any]=1_6 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : str=1_0_2_4 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : str=2 , **_lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =max_position_embeddings
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =classifier_dropout
__lowercase =use_cache
__lowercase =encoder_layers
__lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _lowerCAmelCase):
__lowercase =self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.')
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super().outputs
else:
__lowercase =super(_lowerCAmelCase , self).outputs
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# Generate decoder inputs
__lowercase =seq_length if not self.use_past else 1
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
__lowercase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowercase =dict(**_lowerCAmelCase , **_lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
__lowercase =common_inputs['decoder_input_ids'].shape[1]
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =decoder_seq_length + 3
__lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase)] , dim=1)
__lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase =self.num_layers
__lowercase =min(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =max(_lowerCAmelCase , _lowerCAmelCase) - min_num_layers
__lowercase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowerCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
))
# TODO: test this.
__lowercase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)))
return common_inputs
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase , __lowercase =self.num_layers
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =common_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(_lowerCAmelCase)
]
return common_inputs
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase =tokenizer.num_special_tokens_to_add(_lowerCAmelCase)
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase)
# Generate dummy inputs according to compute batch and sequence
__lowercase =[' '.join([tokenizer.unk_token]) * seq_length] * batch_size
__lowercase =dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase))
return common_inputs
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
elif self.task == "causal-lm":
__lowercase =self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
else:
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
return common_inputs
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
else:
__lowercase =super(_lowerCAmelCase , self)._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
| 48
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 125
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 125
| 1
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = 'masked_bert'
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=3_0522 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=1e-1_2 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[int]="topK" , __lowerCAmelCase : Dict="constant" , __lowerCAmelCase : List[Any]=0.0 , **__lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = pruning_method
_UpperCAmelCase = mask_init
_UpperCAmelCase = mask_scale
| 30
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_a = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_a = spec.loader.load_module()
_a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_a = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCAmelCase__() -> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ = False
# source code of `config_class`
lowerCamelCase__ = inspect.getsource(__snake_case )
lowerCamelCase__ = _re_checkpoint.findall(__snake_case )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ = True
break
lowerCamelCase__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__snake_case )
if len(__snake_case ) > 0:
lowerCamelCase__ = '''\n'''.join(sorted(__snake_case ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 209
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """yolos"""
def __init__( self , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=[5_1_2, 8_6_4] , __lowerCAmelCase=1_6 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = num_detection_tokens
lowerCamelCase__ = use_mid_position_embeddings
lowerCamelCase__ = auxiliary_loss
# Hungarian matcher
lowerCamelCase__ = class_cost
lowerCamelCase__ = bbox_cost
lowerCamelCase__ = giou_cost
# Loss coefficients
lowerCamelCase__ = bbox_loss_coefficient
lowerCamelCase__ = giou_loss_coefficient
lowerCamelCase__ = eos_coefficient
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1_2
| 209
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = self.dummy_uncond_unet
snake_case : Tuple = KarrasVeScheduler()
snake_case : int = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : Dict = torch.manual_seed(0 )
snake_case : Dict = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" , return_dict=UpperCamelCase__ )[0]
snake_case : Tuple = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = "google/ncsnpp-celebahq-256"
snake_case : List[str] = UNetaDModel.from_pretrained(UpperCamelCase__ )
snake_case : Optional[Any] = KarrasVeScheduler()
snake_case : Optional[int] = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = torch.manual_seed(0 )
snake_case : Union[str, Any] = pipe(num_inference_steps=20 , generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case : Any = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 363
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__snake_case = data_utils.TransfoXLTokenizer
__snake_case = data_utils.TransfoXLCorpus
__snake_case = data_utils
__snake_case = data_utils
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : int , lowercase : List[Any] , lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase , "rb" ) as fp:
snake_case : int = pickle.load(lowercase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
snake_case : str = corpus.vocab.__dict__
torch.save(lowercase , lowercase )
snake_case : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowercase )
snake_case : Dict = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowercase , lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case : Union[str, Any] = os.path.abspath(lowercase )
snake_case : str = os.path.abspath(lowercase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case : int = TransfoXLConfig()
else:
snake_case : Optional[int] = TransfoXLConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : str = TransfoXLLMHeadModel(lowercase )
snake_case : str = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase )
# Save pytorch-model
snake_case : Union[str, Any] = os.path.join(lowercase , lowercase )
snake_case : Optional[Any] = os.path.join(lowercase , lowercase )
print(F'Save PyTorch model to {os.path.abspath(lowercase )}' )
torch.save(model.state_dict() , lowercase )
print(F'Save configuration file to {os.path.abspath(lowercase )}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__snake_case = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 112
| 0
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__snake_case : Union[str, Any] =True
except ImportError:
__snake_case : Dict =False
__snake_case : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase__ ( lowerCamelCase_ : Namespace):
'''simple docstring'''
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' ,action='''store_true''' ,help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' ,type=__lowerCamelCase ,help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' ,type=__lowerCamelCase ,help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__lowerCamelCase )
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=None ,*__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = testing
lowerCAmelCase__ : Union[str, Any] = testing_file
lowerCAmelCase__ : Optional[Any] = path
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase__ : Optional[Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__lowerCamelCase ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
lowerCAmelCase__ : Optional[int] = (
Path(__lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase__ : Any = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__lowerCamelCase ) )
else:
with open(self._testing_file ,'''r''' ) as configuration_file:
lowerCAmelCase__ : str = json.load(__lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=__lowerCamelCase ,extra_context=__lowerCamelCase ,)
lowerCAmelCase__ : int = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' ,'''r''' ) as configuration_file:
lowerCAmelCase__ : List[str] = json.load(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = configuration['''lowercase_modelname''']
lowerCAmelCase__ : Optional[Any] = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
lowerCAmelCase__ : List[str] = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ : Optional[Any] = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ : int = '''Flax''' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ : List[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" ,exist_ok=__lowerCamelCase )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" ,'''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" ,f"""{model_dir}/__init__.py""" ,)
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" ,f"""{model_dir}/configuration_{lowercase_model_name}.py""" ,)
def remove_copy_lines(__lowerCamelCase ):
with open(__lowerCamelCase ,'''r''' ) as f:
lowerCAmelCase__ : Any = f.readlines()
with open(__lowerCamelCase ,'''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" ,f"""{model_dir}/modeling_{lowercase_model_name}.py""" ,)
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" ,f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" ,)
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ,f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" ,)
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ,f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" ,)
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ,f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" ,)
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ,f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" ,)
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" ,f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" ,)
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" ,f"""{model_dir}/tokenization_{lowercase_model_name}.py""" ,)
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" ,f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
# Create temp file
lowerCAmelCase__ , lowerCAmelCase__ : Any = mkstemp()
lowerCAmelCase__ : int = False
with fdopen(__lowerCamelCase ,'''w''' ) as new_file:
with open(__lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(__lowerCamelCase )
if line_to_copy_below in line:
lowerCAmelCase__ : Union[str, Any] = True
for line_to_copy in lines_to_copy:
new_file.write(__lowerCamelCase )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__lowerCamelCase ,__lowerCamelCase )
# Remove original file
remove(__lowerCamelCase )
# Move new file
move(__lowerCamelCase ,__lowerCamelCase )
def skip_units(__lowerCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__lowerCamelCase ):
with open(__lowerCamelCase ) as datafile:
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Dict = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase__ : Optional[Any] = line.split('''"''' )[1]
lowerCAmelCase__ : Tuple = skip_units(__lowerCamelCase )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase__ : Any = line.split('''"''' )[1]
lowerCAmelCase__ : List[str] = skip_units(__lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase__ : str = []
elif "##" not in line:
lines_to_copy.append(__lowerCamelCase )
remove(__lowerCamelCase )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__lowerCamelCase )
| 129
|
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(lowerCamelCase_):
if len(lowerCamelCase_) < i + 1:
data_lists.append([])
data_lists[i].append(float(lowerCamelCase_))
return data_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for dlist, weight in zip(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : str = min(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = max(lowerCamelCase_)
lowerCAmelCase__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
lowerCAmelCase__ : Optional[int] = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowerCamelCase_)
score_lists.append(lowerCamelCase_)
return score_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[float] = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(lowerCamelCase_):
lowerCAmelCase__ : str = final_scores[j] + ele
return final_scores
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = get_data(lowerCamelCase_)
lowerCAmelCase__ : Dict = calculate_each_score(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = generate_final_scores(lowerCamelCase_)
# append scores to source data
for i, ele in enumerate(lowerCamelCase_):
source_data[i].append(lowerCamelCase_)
return source_data
| 129
| 1
|
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCAmelCase_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a :List[Any] = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCAmelCase_ )
for item in array )
a :int = answer
return answer
a :int = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[str] = [0] * (target + 1)
a :Any = 1
for i in range(1 , target + 1 ):
for j in range(UpperCAmelCase_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Optional[int] = 3
snake_case : List[Any] = 5
snake_case : Any = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 281
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
a :List[str] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
a :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
a :Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a :str = CLIPTextModel(_lowerCamelCase )
a :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Tuple = image / 2 + 0.5
if str(_lowerCamelCase ).startswith('''mps''' ):
a :List[str] = torch.manual_seed(_lowerCamelCase )
else:
a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :int = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a :Optional[Any] = self.get_dummy_components()
a :Dict = CycleDiffusionPipeline(**_lowerCamelCase )
a :Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :List[str] = self.get_dummy_inputs(_lowerCamelCase )
a :Any = pipe(**_lowerCamelCase )
a :List[Any] = output.images
a :str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a :List[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCamelCase , '''half''' ):
a :Union[str, Any] = module.half()
a :List[Any] = CycleDiffusionPipeline(**_lowerCamelCase )
a :Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Tuple = self.get_dummy_inputs(_lowerCamelCase )
a :Optional[int] = pipe(**_lowerCamelCase )
a :Optional[Any] = output.images
a :List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a :str = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
a :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
a :Optional[Any] = init_image.resize((512, 512) )
a :List[str] = '''CompVis/stable-diffusion-v1-4'''
a :List[str] = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
a :Tuple = CycleDiffusionPipeline.from_pretrained(
_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
a :Optional[Any] = '''A black colored car'''
a :Any = '''A blue colored car'''
a :str = torch.manual_seed(0 )
a :List[Any] = pipe(
prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , )
a :int = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
a :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
a :List[str] = init_image.resize((512, 512) )
a :List[str] = '''CompVis/stable-diffusion-v1-4'''
a :Any = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
a :int = CycleDiffusionPipeline.from_pretrained(_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
a :Optional[int] = '''A black colored car'''
a :Any = '''A blue colored car'''
a :Optional[int] = torch.manual_seed(0 )
a :Union[str, Any] = pipe(
prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , )
a :Optional[int] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 281
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE__ : Tuple = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """albert"""
def __init__( self , UpperCamelCase__=3_0000 , UpperCamelCase__=128 , UpperCamelCase__=4096 , UpperCamelCase__=12 , UpperCamelCase__=1 , UpperCamelCase__=64 , UpperCamelCase__=1_6384 , UpperCamelCase__=1 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0.1 , UpperCamelCase__="absolute" , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=3 , **UpperCamelCase__ , ) -> Dict:
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Dict = vocab_size
lowerCamelCase : Union[str, Any] = embedding_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : str = num_hidden_groups
lowerCamelCase : str = num_attention_heads
lowerCamelCase : str = inner_group_num
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : int = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : List[Any] = classifier_dropout_prob
lowerCamelCase : Dict = position_embedding_type
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 48
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48
| 1
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ : Any = NewType("DataClass", Any)
SCREAMING_SNAKE_CASE__ : List[str] = NewType("DataClassType", Any)
def __magic_name__ ( __lowerCAmelCase : Any ) -> int:
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> str:
__lowerCamelCase = {str(A_ ): choice for choice in choices}
return lambda __lowerCAmelCase : str_to_choice.get(A_ , A_ )
def __magic_name__ ( *,
__lowerCAmelCase : Any = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Tuple = dataclasses.MISSING , __lowerCAmelCase : str = dataclasses.MISSING , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ) -> Any:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__lowerCamelCase = {}
if aliases is not None:
__lowerCamelCase = aliases
if help is not None:
__lowerCamelCase = help
return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ )
class lowerCAmelCase__ ( a_ ):
a__ : Optional[int] = 42
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[DataClassType, Iterable[DataClassType]] , **SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__lowerCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase_ )
if dataclasses.is_dataclass(lowercase_ ):
__lowerCamelCase = [dataclass_types]
__lowerCamelCase = list(lowercase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase_ )
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser , SCREAMING_SNAKE_CASE__ : dataclasses.Field ) -> List[Any]:
__lowerCamelCase = f'''--{field.name}'''
__lowerCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowercase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__lowerCamelCase = kwargs.pop('''aliases''' , [] )
if isinstance(lowercase_ , lowercase_ ):
__lowerCamelCase = [aliases]
__lowerCamelCase = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(lowercase_ , '''UnionType''' ) and isinstance(lowercase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f''' Problem encountered in field \'{field.name}\'.''' )
if type(lowercase_ ) not in field.type.__args__:
# filter `str` in Union
__lowerCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__lowerCamelCase = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__lowerCamelCase = (
field.type.__args__[0] if isinstance(lowercase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__lowerCamelCase = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__lowerCamelCase = {}
if origin_type is Literal or (isinstance(field.type , lowercase_ ) and issubclass(field.type , lowercase_ )):
if origin_type is Literal:
__lowerCamelCase = field.type.__args__
else:
__lowerCamelCase = [x.value for x in field.type]
__lowerCamelCase = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__lowerCamelCase = field.default
else:
__lowerCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__lowerCamelCase = copy(lowercase_ )
# Hack because type=bool in argparse does not behave as we want.
__lowerCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__lowerCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__lowerCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
__lowerCamelCase = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__lowerCamelCase = True
elif isclass(lowercase_ ) and issubclass(lowercase_ , lowercase_ ):
__lowerCamelCase = field.type.__args__[0]
__lowerCamelCase = '''+'''
if field.default_factory is not dataclasses.MISSING:
__lowerCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
__lowerCamelCase = True
else:
__lowerCamelCase = field.type
if field.default is not dataclasses.MISSING:
__lowerCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
__lowerCamelCase = field.default_factory()
else:
__lowerCamelCase = True
parser.add_argument(lowercase_ , *lowercase_ , **lowercase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__lowerCamelCase = False
parser.add_argument(f'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **lowercase_ )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : DataClassType ) -> Optional[int]:
if hasattr(lowercase_ , '''_argument_group_name''' ):
__lowerCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
__lowerCamelCase = self
try:
__lowerCamelCase = get_type_hints(lowercase_ )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowercase_ ):
__lowerCamelCase = '''.'''.join(map(lowercase_ , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowercase_ ):
if not field.init:
continue
__lowerCamelCase = type_hints[field.name]
self._parse_dataclass_field(lowercase_ , lowercase_ )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> str:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__lowerCamelCase = []
if args_filename:
args_files.append(Path(lowercase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__lowerCamelCase = ArgumentParser()
args_file_parser.add_argument(lowercase_ , type=lowercase_ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__lowerCamelCase = args_file_parser.parse_known_args(args=lowercase_ )
__lowerCamelCase = vars(lowercase_ ).get(args_file_flag.lstrip('''-''' ) , lowercase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] )
__lowerCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__lowerCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
__lowerCamelCase = self.parse_known_args(args=lowercase_ )
__lowerCamelCase = []
for dtype in self.dataclass_types:
__lowerCamelCase = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
__lowerCamelCase = {k: v for k, v in vars(lowercase_ ).items() if k in keys}
for k in keys:
delattr(lowercase_ , lowercase_ )
__lowerCamelCase = dtype(**lowercase_ )
outputs.append(lowercase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Dict[str, Any] , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[Any]:
__lowerCamelCase = set(args.keys() )
__lowerCamelCase = []
for dtype in self.dataclass_types:
__lowerCamelCase = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
__lowerCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__lowerCamelCase = dtype(**lowercase_ )
outputs.append(lowercase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}''' )
return tuple(lowercase_ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ) -> Dict:
with open(Path(lowercase_ ) , encoding='''utf-8''' ) as open_json_file:
__lowerCamelCase = json.loads(open_json_file.read() )
__lowerCamelCase = self.parse_dict(lowercase_ , allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
__lowerCamelCase = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) , allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
| 358
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def __A ( self : Any ) -> Tuple:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, input_dict
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self : Optional[Any] ) -> Any:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : List[Any] ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval()
# first forward pass
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval()
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() )
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
a__ : List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a__ : Tuple = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a__ : int = True
a__ : int = False
a__ : Tuple = False
a__ : Optional[int] = True
a__ : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a__ : Tuple = [0.8, 0.9]
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __A ( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __A ( self : Union[str, Any] ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Any:
__lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
model.to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __A ( self : Tuple ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339
| 0
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = ['input_features', 'attention_mask']
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any=8_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6_0_0_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_5 , SCREAMING_SNAKE_CASE_ : str="hamming_window" , SCREAMING_SNAKE_CASE_ : Dict=3_27_68.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.97 , SCREAMING_SNAKE_CASE_ : Any=1.0 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> Optional[int]:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = feature_size
lowercase_ = sampling_rate
lowercase_ = padding_value
lowercase_ = hop_length
lowercase_ = win_length
lowercase_ = frame_signal_scale
lowercase_ = preemphasis_coeff
lowercase_ = mel_floor
lowercase_ = normalize_means
lowercase_ = normalize_vars
lowercase_ = win_function
lowercase_ = return_attention_mask
lowercase_ = win_length * sampling_rate // 1_0_0_0
lowercase_ = hop_length * sampling_rate // 1_0_0_0
lowercase_ = optimal_fft_length(self.sample_size )
lowercase_ = (self.n_fft // 2) + 1
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
lowercase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = window_function(window_length=self.sample_size , name=self.win_function )
lowercase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowercase_ = spectrogram(
one_waveform * self.frame_signal_scale , window=SCREAMING_SNAKE_CASE_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=SCREAMING_SNAKE_CASE_ , preemphasis=self.preemphasis_coeff , mel_filters=SCREAMING_SNAKE_CASE_ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
# make sure we normalize float32 arrays
if self.normalize_means:
lowercase_ = x[:input_length].mean(axis=0 )
lowercase_ = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.normalize_vars:
lowercase_ = x[:input_length].std(axis=0 )
lowercase_ = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
lowercase_ = padding_value
# make sure array is in float32
lowercase_ = x.astype(np.floataa )
return x
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
lowercase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase_ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowercase_ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowercase_ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ = [raw_speech]
# extract fbank features
lowercase_ = [self._extract_mfsc_features(SCREAMING_SNAKE_CASE_ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase_ = BatchFeature({'''input_features''': features} )
lowercase_ = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
lowercase_ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
lowercase_ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowercase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase_ = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase_ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
lowercase_ = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 30
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30
| 1
|
from math import factorial
def A ( _lowerCamelCase = 100 ):
'''simple docstring'''
return sum(int(_lowerCamelCase ) for x in str(factorial(_lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCamelCase__ = HfApi()
lowerCamelCase__ = {}
# fmt: off
lowerCamelCase__ = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowerCamelCase__ = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowerCamelCase__ = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowerCamelCase__ = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowerCamelCase__ = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowerCamelCase__ = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowerCamelCase__ = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowerCamelCase__ = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowerCamelCase__ = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowerCamelCase__ = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowerCamelCase__ = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowerCamelCase__ = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowerCamelCase__ = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowerCamelCase__ = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowerCamelCase__ = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowerCamelCase__ = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCamelCase__ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('/')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('CompVis'):
lowerCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
lowerCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCamelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCamelCase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCamelCase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 234
|
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase__ : List[Any] = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
UpperCamelCase__ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase__ : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase_ ( _lowerCamelCase: tuple ):
return x[0]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = get_letter_count(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = """""".join(freq_to_letter[freq] )
__SCREAMING_SNAKE_CASE : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCamelCase , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = get_frequency_order(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 0
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase = None
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__UpperCamelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__UpperCamelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCamelCase =bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
def remove_articles(__UpperCamelCase : str ):
return ARTICLES_REGEX.sub(''' ''' , __UpperCamelCase )
def white_space_fix(__UpperCamelCase : str ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase : Tuple ):
__UpperCamelCase =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
if not s:
return []
return normalize_answer(__UpperCamelCase ).split()
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : int ):
"""simple docstring"""
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =get_tokens(__UpperCamelCase )
__UpperCamelCase =get_tokens(__UpperCamelCase )
__UpperCamelCase =collections.Counter(__UpperCamelCase ) & collections.Counter(__UpperCamelCase )
__UpperCamelCase =sum(common.values() )
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__UpperCamelCase =1.0 * num_same / len(__UpperCamelCase )
__UpperCamelCase =1.0 * num_same / len(__UpperCamelCase )
__UpperCamelCase =(2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase ={}
__UpperCamelCase ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCamelCase =qa['''id''']
__UpperCamelCase =[t for t in qa['''answers''']['''text'''] if normalize_answer(__UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__UpperCamelCase =['''''']
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
__UpperCamelCase =preds[qid]
# Take max over all gold answers
__UpperCamelCase =max(compute_exact(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
__UpperCamelCase =max(compute_fa(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase ={}
for qid, s in scores.items():
__UpperCamelCase =na_probs[qid] > na_prob_thresh
if pred_na:
__UpperCamelCase =float(not qid_to_has_ans[qid] )
else:
__UpperCamelCase =s
return new_scores
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict=None ):
"""simple docstring"""
if not qid_list:
__UpperCamelCase =len(__UpperCamelCase )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__UpperCamelCase =len(__UpperCamelCase )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
"""simple docstring"""
for k in new_eval:
__UpperCamelCase =new_eval[k]
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
"""simple docstring"""
plt.step(__UpperCamelCase , __UpperCamelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__UpperCamelCase , __UpperCamelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(__UpperCamelCase )
plt.savefig(__UpperCamelCase )
plt.clf()
def lowerCAmelCase (__UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=None ):
"""simple docstring"""
__UpperCamelCase =sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
__UpperCamelCase =0.0
__UpperCamelCase =1.0
__UpperCamelCase =0.0
__UpperCamelCase =[1.0]
__UpperCamelCase =[0.0]
__UpperCamelCase =0.0
for i, qid in enumerate(__UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__UpperCamelCase =true_pos / float(i + 1 )
__UpperCamelCase =true_pos / float(__UpperCamelCase )
if i == len(__UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__UpperCamelCase )
recalls.append(__UpperCamelCase )
if out_image:
plot_pr_curve(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return {"ap": 1_0_0.0 * avg_prec}
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if out_image_dir and not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
__UpperCamelCase =sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__UpperCamelCase =make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__UpperCamelCase =make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__UpperCamelCase ={k: float(__UpperCamelCase ) for k, v in qid_to_has_ans.items()}
__UpperCamelCase =make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_exact''' )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_f1''' )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_oracle''' )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not qid_list:
return
__UpperCamelCase =[na_probs[k] for k in qid_list]
__UpperCamelCase =np.ones_like(__UpperCamelCase ) / float(len(__UpperCamelCase ) )
plt.hist(__UpperCamelCase , weights=__UpperCamelCase , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__UpperCamelCase , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__UpperCamelCase =num_no_ans
__UpperCamelCase =cur_score
__UpperCamelCase =0.0
__UpperCamelCase =sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
for i, qid in enumerate(__UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__UpperCamelCase =scores[qid]
else:
if preds[qid]:
__UpperCamelCase =-1
else:
__UpperCamelCase =0
cur_score += diff
if cur_score > best_score:
__UpperCamelCase =cur_score
__UpperCamelCase =na_probs[qid]
return 1_0_0.0 * best_score / len(__UpperCamelCase ), best_thresh
def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase =find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase , __UpperCamelCase =find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =best_exact
__UpperCamelCase =exact_thresh
__UpperCamelCase =best_fa
__UpperCamelCase =fa_thresh
def lowerCAmelCase ():
"""simple docstring"""
with open(OPTS.data_file ) as f:
__UpperCamelCase =json.load(__UpperCamelCase )
__UpperCamelCase =dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__UpperCamelCase =json.load(__UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__UpperCamelCase =json.load(__UpperCamelCase )
else:
__UpperCamelCase ={k: 0.0 for k in preds}
__UpperCamelCase =make_qid_to_has_ans(__UpperCamelCase ) # maps qid to True/False
__UpperCamelCase =[k for k, v in qid_to_has_ans.items() if v]
__UpperCamelCase =[k for k, v in qid_to_has_ans.items() if not v]
__UpperCamelCase , __UpperCamelCase =get_raw_scores(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
__UpperCamelCase =apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
__UpperCamelCase =make_eval_dict(__UpperCamelCase , __UpperCamelCase )
if has_ans_qids:
__UpperCamelCase =make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''HasAns''' )
if no_ans_qids:
__UpperCamelCase =make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
else:
print(json.dumps(__UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
__lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 85
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase (__UpperCamelCase : dict , __UpperCamelCase : str , __UpperCamelCase : set , __UpperCamelCase : set , __UpperCamelCase : dict , __UpperCamelCase : dict , __UpperCamelCase : PriorityQueue , __UpperCamelCase : dict , __UpperCamelCase : float | int , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__UpperCamelCase =cst_fwd.get(__UpperCamelCase , np.inf )
__UpperCamelCase =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__UpperCamelCase =new_cost_f
__UpperCamelCase =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__UpperCamelCase =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : dict , __UpperCamelCase : dict ):
"""simple docstring"""
__UpperCamelCase =-1
__UpperCamelCase =set()
__UpperCamelCase =set()
__UpperCamelCase ={source: 0}
__UpperCamelCase ={destination: 0}
__UpperCamelCase ={source: None}
__UpperCamelCase ={destination: None}
__UpperCamelCase =PriorityQueue()
__UpperCamelCase =PriorityQueue()
__UpperCamelCase =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__UpperCamelCase , __UpperCamelCase =queue_forward.get()
visited_forward.add(__UpperCamelCase )
__UpperCamelCase , __UpperCamelCase =queue_backward.get()
visited_backward.add(__UpperCamelCase )
__UpperCamelCase =pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
__UpperCamelCase =pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__UpperCamelCase =shortest_distance
return shortest_path_distance
__lowercase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowercase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
| 1
|
def lowerCAmelCase_ ( _snake_case : int = 1000 ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] = 1, 1
__magic_name__ : Optional[int] = []
for i in range(1 , n + 1 ):
__magic_name__ : Tuple = prev_numerator + 2 * prev_denominator
__magic_name__ : Any = prev_numerator + prev_denominator
if len(str(_snake_case ) ) > len(str(_snake_case ) ):
result.append(_snake_case )
__magic_name__ : Any = numerator
__magic_name__ : int = denominator
return len(_snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 281
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_snake_case , encoding="utf_8" ) as f:
__magic_name__ : List[str] = csv.reader(_snake_case )
__magic_name__ : List[Any] = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for dataset in encoded_datasets:
__magic_name__ : Union[str, Any] = len(_snake_case )
__magic_name__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__magic_name__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
__magic_name__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__magic_name__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : str = with_conta
__magic_name__ : Tuple = with_conta
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
__magic_name__ : int = len(_snake_case ) - 1
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[int] = mc_label
__magic_name__ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_snake_case , default="" )
parser.add_argument("--eval_dataset" , type=_snake_case , default="" )
parser.add_argument("--seed" , type=_snake_case , default=42 )
parser.add_argument("--num_train_epochs" , type=_snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=_snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=_snake_case , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_snake_case , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=_snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=_snake_case , default=374 )
parser.add_argument("--server_ip" , type=_snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_snake_case , default="" , help="Can be used for distant debugging." )
__magic_name__ : List[Any] = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ : List[Any] = ["_start_", "_delimiter_", "_classify_"]
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__magic_name__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : str ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("Encoding dataset..." )
__magic_name__ : Optional[int] = load_rocstories_dataset(args.train_dataset )
__magic_name__ : str = load_rocstories_dataset(args.eval_dataset )
__magic_name__ : int = (train_dataset, eval_dataset)
__magic_name__ : List[str] = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__magic_name__ : Optional[Any] = model.config.n_positions // 2 - 2
__magic_name__ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ : List[str] = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ : List[Any] = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__magic_name__ , __magic_name__ : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__magic_name__ : Tuple = TensorDataset(*_snake_case )
__magic_name__ : Union[str, Any] = RandomSampler(_snake_case )
__magic_name__ : Dict = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__magic_name__ : Any = TensorDataset(*_snake_case )
__magic_name__ : Optional[Any] = SequentialSampler(_snake_case )
__magic_name__ : int = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ : Tuple = args.max_steps
__magic_name__ : List[str] = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ : List[str] = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ : str = list(model.named_parameters() )
__magic_name__ : Dict = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__magic_name__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__magic_name__ : str = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Dict = tqdm(_snake_case , desc="Training" )
for step, batch in enumerate(_snake_case ):
__magic_name__ : Optional[Any] = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = batch
__magic_name__ : Optional[Any] = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ : int = "Training loss: {:.2e} lr: {:.2e}".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ : Dict = model.module if hasattr(_snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ : List[Any] = os.path.join(args.output_dir , _snake_case )
__magic_name__ : Dict = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ : Any = 0, 0
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 0
for batch in tqdm(_snake_case , desc="Evaluating" ):
__magic_name__ : int = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Tuple = mc_logits.detach().cpu().numpy()
__magic_name__ : Any = mc_labels.to("cpu" ).numpy()
__magic_name__ : str = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ : Tuple = eval_loss / nb_eval_steps
__magic_name__ : List[Any] = eval_accuracy / nb_eval_examples
__magic_name__ : int = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__magic_name__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 281
| 1
|
class lowercase :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[Any]=None ) -> int:
UpperCAmelCase_= data
UpperCAmelCase_= previous
UpperCAmelCase_= next_node
def __str__( self : Tuple ) -> str:
return F"""{self.data}"""
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return self.data
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
return self.next
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return self.previous
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[Any]:
UpperCAmelCase_= head
def __iter__( self : Optional[Any] ) -> Optional[int]:
return self
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
if not self.current:
raise StopIteration
else:
UpperCAmelCase_= self.current.get_data()
UpperCAmelCase_= self.current.get_next()
return value
class lowercase :
"""simple docstring"""
def __init__( self : str ) -> int:
UpperCAmelCase_= None # First node in list
UpperCAmelCase_= None # Last node in list
def __str__( self : Any ) -> Tuple:
UpperCAmelCase_= self.head
UpperCAmelCase_= []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase_= current.get_next()
return " ".join(str(__UpperCAmelCase ) for node in nodes )
def __contains__( self : Optional[int] , __UpperCAmelCase : int ) -> int:
UpperCAmelCase_= self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase_= current.get_next()
return False
def __iter__( self : Any ) -> Dict:
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Node ) -> None:
if self.head is None:
UpperCAmelCase_= node
UpperCAmelCase_= node
else:
self.insert_before_node(self.head , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Node ) -> None:
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.insert_after_node(self.tail , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : int ) -> None:
UpperCAmelCase_= Node(__UpperCAmelCase )
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.set_tail(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Node , __UpperCAmelCase : Node ) -> None:
UpperCAmelCase_= node
UpperCAmelCase_= node.previous
if node.get_previous() is None:
UpperCAmelCase_= node_to_insert
else:
UpperCAmelCase_= node_to_insert
UpperCAmelCase_= node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Node , __UpperCAmelCase : Node ) -> None:
UpperCAmelCase_= node
UpperCAmelCase_= node.next
if node.get_next() is None:
UpperCAmelCase_= node_to_insert
else:
UpperCAmelCase_= node_to_insert
UpperCAmelCase_= node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
UpperCAmelCase_= 1
UpperCAmelCase_= Node(__UpperCAmelCase )
UpperCAmelCase_= self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase )
return
current_position += 1
UpperCAmelCase_= node.next
self.insert_after_node(self.tail , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : int ) -> Node:
UpperCAmelCase_= self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase_= node.get_next()
raise Exception("""Node not found""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
if (node := self.get_node(__UpperCAmelCase )) is not None:
if node == self.head:
UpperCAmelCase_= self.head.get_next()
if node == self.tail:
UpperCAmelCase_= self.tail.get_previous()
self.remove_node_pointers(__UpperCAmelCase )
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Node ) -> None:
if node.get_next():
UpperCAmelCase_= node.previous
if node.get_previous():
UpperCAmelCase_= node.next
UpperCAmelCase_= None
UpperCAmelCase_= None
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return self.head is None
def __a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase_= DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __UpperCAmelCase ):
UpperCAmelCase_= (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase_= (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , eta=__UpperCAmelCase , use_clipped_model_output=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
UpperCAmelCase_= (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 277
| 1
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Dict ):
A = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
A = F'{src_lang}-{tgt_lang}'
A = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A = os.path.join(snake_case__ , 'README.md' )
print(F'Generating {path}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
_lowercase = Path(__file__).resolve().parent.parent.parent
_lowercase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase , _lowercase , _lowercase = model_name.split('''-''')
_lowercase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 74
|
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339
| 0
|
'''simple docstring'''
def _a ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _a ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 240
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCAmelCase :int = datasets.utils.logging.get_logger(__name__)
@dataclass
class a ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
SCREAMING_SNAKE_CASE : str = "utf-8"
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : bool = True # deprecated
SCREAMING_SNAKE_CASE : Optional[int] = None # deprecated
SCREAMING_SNAKE_CASE : int = 1_0 << 2_0 # 10MB
SCREAMING_SNAKE_CASE : Optional[bool] = None
class a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = JsonConfig
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
__UpperCAmelCase : Tuple = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self : Dict , snake_case : Tuple ) -> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__UpperCAmelCase : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
__UpperCAmelCase : Dict = data_files
if isinstance(snake_case , snake_case ):
__UpperCAmelCase : List[Any] = [files]
__UpperCAmelCase : Tuple = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
__UpperCAmelCase : Any = [files]
__UpperCAmelCase : Optional[int] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={'''files''': files} ) )
return splits
def lowerCamelCase__ ( self : List[str] , snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__UpperCAmelCase : Any = self.config.features.arrow_schema.field(snake_case ).type
__UpperCAmelCase : Dict = pa_table.append_column(snake_case , pa.array([None] * len(snake_case ) , type=snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase : Tuple = table_cast(snake_case , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self : Tuple , snake_case : Any ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase : Optional[int] = json.load(snake_case )
# We keep only the field we are interested in
__UpperCAmelCase : int = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case , (list, tuple) ):
__UpperCAmelCase : Optional[Any] = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase : Union[str, Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
else:
__UpperCAmelCase : Optional[int] = dataset
__UpperCAmelCase : Tuple = pa.Table.from_pydict(snake_case )
yield file_idx, self._cast_table(snake_case )
# If the file has one json object per line
else:
with open(snake_case , '''rb''' ) as f:
__UpperCAmelCase : Optional[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__UpperCAmelCase : int = max(self.config.chunksize // 32 , 16 << 10 )
__UpperCAmelCase : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
__UpperCAmelCase : List[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__UpperCAmelCase : Union[str, Any] = batch.decode(self.config.encoding , errors=snake_case ).encode('''utf-8''' )
try:
while True:
try:
__UpperCAmelCase : List[str] = paj.read_json(
io.BytesIO(snake_case ) , read_options=paj.ReadOptions(block_size=snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case , pa.ArrowInvalid )
and "straddling" not in str(snake_case )
or block_size > len(snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(snake_case )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase : Optional[Any] = json.load(snake_case )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case , snake_case ): # list is the only sequence type supported in JSON
try:
__UpperCAmelCase : Dict = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase : Optional[Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
__UpperCAmelCase : Union[str, Any] = pa.Table.from_pydict(snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(snake_case )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
batch_idx += 1
| 240
| 1
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowercase__ = re.compile(r'([A-Z]+)([A-Z][a-z])')
lowercase__ = re.compile(r'([a-z\d])([A-Z])')
lowercase__ = re.compile(r'(?<!_)_(?!_)')
lowercase__ = re.compile(r'(_{2,})')
lowercase__ = r'''^\w+(\.\w+)*$'''
lowercase__ = r'''<>:/\|?*'''
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = _uppercase_uppercase_re.sub(r'\1_\2' , _lowerCAmelCase )
a__: List[Any] = _lowercase_uppercase_re.sub(r'\1_\2' , _lowerCAmelCase )
return name.lower()
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
a__: Any = _single_underscore_re.split(_lowerCAmelCase )
a__: Tuple = [_multiple_underscores_re.split(_lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCAmelCase ) if n != '' )
def __a ( _SCREAMING_SNAKE_CASE ) ->Dict:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(_lowerCAmelCase )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , _lowerCAmelCase ):
raise ValueError(F'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return F'{filename_prefix_for_name(_lowerCAmelCase )}-{split}'
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: List[Any] = filename_prefix_for_split(_lowerCAmelCase , _lowerCAmelCase )
if filetype_suffix:
prefix += F'.{filetype_suffix}'
a__: Optional[int] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
return F'{filepath}*'
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: Tuple = filename_prefix_for_split(_lowerCAmelCase , _lowerCAmelCase )
a__: int = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if shard_lengths:
a__: int = len(_lowerCAmelCase )
a__: List[Any] = [F'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(_lowerCAmelCase )]
if filetype_suffix:
a__: Any = [filename + F'.{filetype_suffix}' for filename in filenames]
return filenames
else:
a__: str = prefix
if filetype_suffix:
filename += F'.{filetype_suffix}'
return [filename]
| 290
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : int = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : List[Any] ,lowercase__ : List[str]=7 ,lowercase__ : Tuple=3 ,lowercase__ : Dict=1_8 ,lowercase__ : int=3_0 ,lowercase__ : Optional[int]=4_0_0 ,lowercase__ : int=True ,lowercase__ : List[Any]=None ,lowercase__ : Any=True ,lowercase__ : Optional[int]=None ,lowercase__ : str=True ,lowercase__ : List[Any]=[0.5, 0.5, 0.5] ,lowercase__ : List[Any]=[0.5, 0.5, 0.5] ,):
__lowercase = size if size is not None else {'''shortest_edge''': 1_8}
__lowercase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def SCREAMING_SNAKE_CASE ( self : Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = LevitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_center_crop''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size ,{'''height''': 1_8, '''width''': 1_8} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 356
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : List[str] ,**lowercase__ : Optional[Any] ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 52
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = "ZinengTang/tvlt-base"
snake_case_ = tempfile.mkdtemp()
def lowerCAmelCase__ ( self , **a__ ) -> Any:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> Union[str, Any]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_feature_extractor()
snake_case_ = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a__ )
self.assertIsInstance(processor.image_processor , a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_feature_extractor()
snake_case_ = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
snake_case_ = np.ones([12_000] )
snake_case_ = feature_extractor(a__ , return_tensors="np" )
snake_case_ = processor(audio=a__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_feature_extractor()
snake_case_ = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
snake_case_ = np.ones([3, 224, 224] )
snake_case_ = image_processor(a__ , return_tensors="np" )
snake_case_ = processor(images=a__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_feature_extractor()
snake_case_ = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
snake_case_ = np.ones([12_000] )
snake_case_ = np.ones([3, 224, 224] )
snake_case_ = processor(audio=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_feature_extractor()
snake_case_ = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 85
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor()
_SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : int = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''ctrl'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=246534 , lowercase_ : Dict=256 , lowercase_ : int=1280 , lowercase_ : str=8192 , lowercase_ : Any=48 , lowercase_ : List[Any]=16 , lowercase_ : int=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[int]=1E-6 , lowercase_ : str=0.02 , lowercase_ : Optional[int]=True , **lowercase_ : Optional[int] , ):
lowercase_ : str = vocab_size
lowercase_ : Any = n_positions
lowercase_ : int = n_embd
lowercase_ : List[Any] = n_layer
lowercase_ : List[Any] = n_head
lowercase_ : Any = dff
lowercase_ : Optional[Any] = resid_pdrop
lowercase_ : int = embd_pdrop
lowercase_ : str = layer_norm_epsilon
lowercase_ : Any = initializer_range
lowercase_ : Union[str, Any] = use_cache
super().__init__(**lowercase_ )
| 354
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 0
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : Dict = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 264
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
A__: Dict = None
A__: Tuple = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
A__: Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[str]=256 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
with open(_UpperCAmelCase ,"""r""" ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
with open(_UpperCAmelCase ,"""w""" ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=True ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp""" )
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : int =read_json(os.path.join(_UpperCAmelCase ,"""params.json""" ) )
_a : int =NUM_SHARDS[model_size]
_a : Dict =params["""n_layers"""]
_a : Union[str, Any] =params["""n_heads"""]
_a : List[str] =n_heads // num_shards
_a : int =params["""dim"""]
_a : Union[str, Any] =dim // n_heads
_a : int =1_0_0_0_0.0
_a : str =1.0 / (base ** (torch.arange(0 ,_UpperCAmelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a : str =params["""n_kv_heads"""] # for GQA / MQA
_a : Optional[Any] =n_heads_per_shard // num_key_value_heads
_a : Optional[int] =dim // num_key_value_heads
else: # compatibility with other checkpoints
_a : str =n_heads
_a : Any =n_heads_per_shard
_a : str =dim
# permute for sliced rotary
def permute(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=n_heads ,_UpperCAmelCase : Optional[int]=dim ,_UpperCAmelCase : List[str]=dim ):
return w.view(_UpperCAmelCase ,dima // n_heads // 2 ,2 ,_UpperCAmelCase ).transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a : Any =torch.load(os.path.join(_UpperCAmelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
_a : List[Any] =[
torch.load(os.path.join(_UpperCAmelCase ,F"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" )
for i in range(_UpperCAmelCase )
]
_a : Any =0
_a : Optional[int] ={"""weight_map""": {}}
for layer_i in range(_UpperCAmelCase ):
_a : List[str] =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a : Tuple ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_a : str =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Tuple =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : Tuple =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : str =inv_freq
for k, v in state_dict.items():
_a : Any =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_a : int ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_UpperCAmelCase )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_UpperCAmelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
_a : Dict =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
# Write configs
_a : Tuple ={"""total_size""": param_count * 2}
write_json(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,"""pytorch_model.bin.index.json""" ) )
_a : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_a : int =params["""multiple_of"""] if """multiple_of""" in params else 256
_a : List[Any] =LlamaConfig(
hidden_size=_UpperCAmelCase ,intermediate_size=compute_intermediate_size(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_UpperCAmelCase ,)
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_a : Any =LlamaForCausalLM.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_UpperCAmelCase ,safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[Any]:
# Initialize the tokenizer based on the `spm` model
_a : List[str] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_a : List[Any] =tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_UpperCAmelCase ,help="""Whether or not to save using `safetensors`.""" )
_a : Optional[Any] =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_a : List[Any] =os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 276
| 0
|
__magic_name__ : Optional[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
snake_case_ = Stack()
snake_case_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase__ )
elif i == ")":
# RULE 4
snake_case_ = operator_stack.peek()
operator_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operators[opr](UpperCamelCase__ , UpperCamelCase__ )
operand_stack.push(UpperCamelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__ : Any = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 367
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=99 , snake_case=0 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_lengths
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = gelu_activation
snake_case_ = sinusoidal_embeddings
snake_case_ = causal
snake_case_ = asm
snake_case_ = n_langs
snake_case_ = vocab_size
snake_case_ = n_special
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = summary_type
snake_case_ = use_proj
snake_case_ = scope
snake_case_ = bos_token_id
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , lengths=snake_case , langs=snake_case )
snake_case_ = model(snake_case , langs=snake_case )
snake_case_ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMWithLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnsweringSimple(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
snake_case_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , )
((snake_case_) , ) = result_with_labels.to_tuple()
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
((snake_case_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_labels
snake_case_ = XLMForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_choices
snake_case_ = XLMForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__SCREAMING_SNAKE_CASE : int = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a ( self , snake_case , snake_case , snake_case=False ):
snake_case_ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def a ( self ):
snake_case_ = XLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case , emb_dim=37 )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = min_length + idx + 1
snake_case_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , )
pass
@slow
def a ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = XLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(snake_case )
snake_case_ = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case ) # the president
snake_case_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case_ = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
| 200
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ):
a__ = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('RGB' )
return image
def __lowercase ( __lowerCAmelCase : str ):
a__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
a__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
a__ = torch.cat((q_bias, torch.zeros_like(__lowerCAmelCase , requires_grad=__lowerCAmelCase ), v_bias) )
a__ = qkv_bias
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
a__ = 3_6_4 if 'coco' in model_name else 2_2_4
a__ = BlipaVisionConfig(image_size=__lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a__ = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__lowerCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
a__ = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__lowerCAmelCase ).to_dict()
elif "t5-xl" in model_name:
a__ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
a__ = BlipaConfig(vision_config=__lowerCAmelCase , text_config=__lowerCAmelCase )
return config, image_size
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str=False ):
a__ = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
a__ = tokenizer('\n' , add_special_tokens=__lowerCAmelCase ).input_ids[0]
a__ , a__ = get_blipa_config(__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
a__ = BlipaForConditionalGeneration(__lowerCAmelCase ).eval()
a__ = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
a__ , a__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a__ = 'cuda' if torch.cuda.is_available() else 'cpu'
a__ , a__ , a__ = load_model_and_preprocess(
name=__lowerCAmelCase , model_type=__lowerCAmelCase , is_eval=__lowerCAmelCase , device=__lowerCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
a__ = original_model.state_dict()
a__ = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ = state_dict.pop(__lowerCAmelCase )
if key.startswith('Qformer.bert' ):
a__ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a__ = key.replace('self' , 'attention' )
if "opt_proj" in key:
a__ = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
a__ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
a__ = key.replace('opt' , 'language' )
if key.startswith('t5' ):
a__ = key.replace('t5' , 'language' )
a__ = val
# read in qv biases
read_in_q_v_bias(__lowerCAmelCase , __lowerCAmelCase )
a__ , a__ = hf_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert len(__lowerCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a__ = load_demo_image()
a__ = vis_processors['eval'](__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
a__ = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__lowerCAmelCase )
# create processor
a__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
a__ = BlipaProcessor(image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
a__ = processor(images=__lowerCAmelCase , return_tensors='pt' ).pixel_values.to(__lowerCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
original_model.to(__lowerCAmelCase )
hf_model.to(__lowerCAmelCase )
with torch.no_grad():
if "opt" in model_name:
a__ = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
a__ = hf_model(__lowerCAmelCase , __lowerCAmelCase ).logits
else:
a__ = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
a__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
a__ = hf_model(__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a__ = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=__lowerCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a__ = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=__lowerCAmelCase )
else:
# cast to same type
a__ = logits.dtype
assert torch.allclose(original_logits.to(__lowerCAmelCase ) , __lowerCAmelCase , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
a__ = ''
a__ = tokenizer(__lowerCAmelCase , return_tensors='pt' ).input_ids.to(__lowerCAmelCase )
a__ = original_model.generate({'image': original_pixel_values} )
a__ = hf_model.generate(
__lowerCAmelCase , __lowerCAmelCase , do_sample=__lowerCAmelCase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __lowerCAmelCase )
a__ = input_ids.shape[1]
a__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowerCAmelCase )
a__ = [text.strip() for text in output_text]
print('HF generation:' , __lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
snake_case : List[str] = argparse.ArgumentParser()
snake_case : int = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
snake_case : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 240
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
snake_case : int = False
@skip_mps
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__( cls :int ) -> int:
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowerCamelCase__( cls :Optional[Any] ) -> int:
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=__snake_case ,)
a__ = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=__snake_case ,set_alpha_to_one=__snake_case ,)
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act='gelu' ,projection_dim=5_12 ,)
a__ = CLIPTextModel(__snake_case )
a__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase__( self :Any ,__snake_case :Union[str, Any] ,__snake_case :Tuple=0 ) -> Any:
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = a__ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowerCamelCase__( self :Optional[int] ) -> Any:
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a__ = self.get_dummy_inputs(__snake_case )
a__ = pipe(**__snake_case ).images
a__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 64, 64, 3) )
a__ = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
a__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case ,1E-3 )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase__( self :Dict ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7E-4 )
def lowerCamelCase__( self :Dict ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class snake_case_ (unittest.TestCase ):
@classmethod
def lowerCamelCase__( cls :Optional[Any] ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowerCamelCase__( cls :Optional[int] ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = torch.manual_seed(51 )
a__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,safety_checker=__snake_case ,torch_dtype=torch.floataa )
pipe.to('cuda' )
a__ = 'a painting of an elephant with glasses'
a__ = [5, 7]
a__ = pipe(
prompt=__snake_case ,token_indices=__snake_case ,guidance_scale=7.5 ,generator=__snake_case ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type='numpy' ,).images[0]
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 240
| 1
|
class UpperCAmelCase :
def __init__(self : int , snake_case__ : int ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = n
snake_case : Any = [None] * self.n
snake_case : List[Any] = 0 # index of the first element
snake_case : Tuple = 0
snake_case : Dict = 0
def __len__(self : Dict ) -> int:
'''simple docstring'''
return self.size
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> bool:
'''simple docstring'''
return self.size == 0
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
snake_case : Union[str, Any] = data
snake_case : Union[str, Any] = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
snake_case : int = self.array[self.front]
snake_case : Optional[Any] = None
snake_case : Any = (self.front + 1) % self.n
self.size -= 1
return temp
| 371
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = '''dpt'''
def __init__( self : int , UpperCAmelCase__ : List[Any]=7_6_8 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : List[str]=3_0_7_2 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[Any]=1E-12 , UpperCAmelCase__ : List[str]=3_8_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=[2, 5, 8, 1_1] , UpperCAmelCase__ : Union[str, Any]="project" , UpperCAmelCase__ : List[Any]=[4, 2, 1, 0.5] , UpperCAmelCase__ : List[Any]=[9_6, 1_9_2, 3_8_4, 7_6_8] , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Dict=0.4 , UpperCAmelCase__ : Any=2_5_5 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=[1, 1_0_2_4, 2_4, 2_4] , UpperCAmelCase__ : Any=[0, 1] , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : str , ) -> int:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = hidden_size
lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
lowerCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCAmelCase = backbone_featmap_shape
lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = []
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
lowerCAmelCase = readout_type
lowerCAmelCase = reassemble_factors
lowerCAmelCase = neck_hidden_sizes
lowerCAmelCase = fusion_hidden_size
lowerCAmelCase = head_in_index
lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = semantic_loss_ignore_index
lowerCAmelCase = semantic_classifier_dropout
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 4
|
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = set_counts
UpperCamelCase : int = max(A_ )
UpperCamelCase : Optional[Any] = len(A_ )
UpperCamelCase : Union[str, Any] = [1] * num_sets
UpperCamelCase : Union[str, Any] = list(range(A_ ) )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = self.get_parent(A_ )
UpperCamelCase : Optional[int] = self.get_parent(A_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase : int = 0
UpperCamelCase : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase : Any = 0
UpperCamelCase : Optional[int] = src_parent
UpperCamelCase : int = self.set_counts[src_parent]
UpperCamelCase : Any = max(self.max_set , A_ )
return True
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 52
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = """lxmert"""
lowerCAmelCase : Dict = {}
def __init__( self , UpperCAmelCase__=30_522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=9_500 , UpperCAmelCase__=1_600 , UpperCAmelCase__=400 , UpperCAmelCase__=3_072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-1_2 , UpperCAmelCase__=9 , UpperCAmelCase__=5 , UpperCAmelCase__=5 , UpperCAmelCase__=2_048 , UpperCAmelCase__=4 , UpperCAmelCase__=6.67 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , **UpperCAmelCase__ , ):
A__ = vocab_size
A__ = hidden_size
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = num_qa_labels
A__ = num_object_labels
A__ = num_attr_labels
A__ = l_layers
A__ = x_layers
A__ = r_layers
A__ = visual_feat_dim
A__ = visual_pos_dim
A__ = visual_loss_normalizer
A__ = task_matched
A__ = task_mask_lm
A__ = task_obj_predict
A__ = task_qa
A__ = visual_obj_loss
A__ = visual_attr_loss
A__ = visual_feat_loss
A__ = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**UpperCAmelCase__ )
| 198
|
from __future__ import annotations
class UpperCamelCase :
def __init__( self , UpperCAmelCase__=None ):
A__ = data
A__ = None
def __repr__( self ):
A__ = []
A__ = self
while temp:
string_rep.append(F"""{temp.data}""" )
A__ = temp.next
return "->".join(UpperCAmelCase__ )
def UpperCamelCase ( _A : list )-> Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
A__ = A__ = Node(elements_list[0] )
for i in range(1 , len(_A ) ):
A__ = Node(elements_list[i] )
A__ = current.next
return head
def UpperCamelCase ( _A : Node )-> None:
"""simple docstring"""
if head_node is not None and isinstance(_A , _A ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
from doctest import testmod
testmod()
A__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(_A )
print("Elements in Reverse:" )
print_reverse(_A )
if __name__ == "__main__":
main()
| 198
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class a ( _a ):
UpperCamelCase : Dict = """longformer"""
def __init__( self : Dict , lowerCAmelCase : Dict = 512 , lowerCAmelCase : Dict = 2 , lowerCAmelCase : Optional[Any] = 1 , lowerCAmelCase : List[Any] = 0 , lowerCAmelCase : Optional[int] = 2 , lowerCAmelCase : Tuple = 3_0522 , lowerCAmelCase : Union[str, Any] = 768 , lowerCAmelCase : Optional[Any] = 12 , lowerCAmelCase : List[str] = 12 , lowerCAmelCase : Tuple = 3072 , lowerCAmelCase : Any = "gelu" , lowerCAmelCase : Dict = 0.1 , lowerCAmelCase : int = 0.1 , lowerCAmelCase : List[str] = 512 , lowerCAmelCase : Optional[Any] = 2 , lowerCAmelCase : List[Any] = 0.0_2 , lowerCAmelCase : Optional[Any] = 1E-12 , lowerCAmelCase : List[Any] = False , **lowerCAmelCase : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_window
SCREAMING_SNAKE_CASE_: Union[str, Any] =sep_token_id
SCREAMING_SNAKE_CASE_: int =bos_token_id
SCREAMING_SNAKE_CASE_: Tuple =eos_token_id
SCREAMING_SNAKE_CASE_: List[Any] =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE_: int =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_: int =type_vocab_size
SCREAMING_SNAKE_CASE_: str =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =onnx_export
class a ( _a ):
def __init__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any = "default" , lowerCAmelCase : Optional[int] = None ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
@property
def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_: List[Any] ={0: 'batch'}
return outputs
@property
def lowerCamelCase__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int = -1 , lowerCAmelCase : Optional[int] = -1 , lowerCAmelCase : Optional[Any] = False , lowerCAmelCase : Union[str, Any] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =super().generate_dummy_inputs(
preprocessor=lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_: Any =torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
SCREAMING_SNAKE_CASE_: List[str] =1
return inputs
| 173
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 0
|
"""simple docstring"""
import string
import numpy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class __A :
'''simple docstring'''
lowerCAmelCase : List[str] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCAmelCase : Tuple = numpy.vectorize(lambda A_ : x % 3_6 )
lowerCAmelCase : List[Any] = numpy.vectorize(A_ )
def __init__( self : Optional[Any] ,_snake_case : numpy.ndarray ) -> None:
"""simple docstring"""
lowercase__ : Tuple = self.modulus(_snake_case ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase__ : Union[str, Any] = encrypt_key.shape[0]
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> int:
"""simple docstring"""
return self.key_string.index(_snake_case )
def UpperCAmelCase ( self : str ,_snake_case : int ) -> str:
"""simple docstring"""
return self.key_string[round(_snake_case )]
def UpperCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowercase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase__ : Tuple = det % len(self.key_string )
lowercase__ : Any = len(self.key_string )
if greatest_common_divisor(_snake_case ,len(self.key_string ) ) != 1:
lowercase__ : List[Any] = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(_snake_case )
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = [char for char in text.upper() if char in self.key_string]
lowercase__ : Optional[int] = chars[-1]
while len(_snake_case ) % self.break_key != 0:
chars.append(_snake_case )
return "".join(_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> str:
"""simple docstring"""
lowercase__ : int = self.process_text(text.upper() )
lowercase__ : List[Any] = ''''''
for i in range(0 ,len(_snake_case ) - self.break_key + 1 ,self.break_key ):
lowercase__ : Optional[Any] = text[i : i + self.break_key]
lowercase__ : List[Any] = [self.replace_letters(_snake_case ) for char in batch]
lowercase__ : Optional[int] = numpy.array([vec] ).T
lowercase__ : List[Any] = self.modulus(self.encrypt_key.dot(_snake_case ) ).T.tolist()[
0
]
lowercase__ : Union[str, Any] = ''''''.join(
self.replace_digits(_snake_case ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase ( self : Any ) -> numpy.ndarray:
"""simple docstring"""
lowercase__ : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase__ : int = det % len(self.key_string )
lowercase__ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase__ : Union[str, Any] = i
break
lowercase__ : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_snake_case ) )
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = self.make_decrypt_key()
lowercase__ : List[Any] = self.process_text(text.upper() )
lowercase__ : Any = ''''''
for i in range(0 ,len(_snake_case ) - self.break_key + 1 ,self.break_key ):
lowercase__ : Optional[Any] = text[i : i + self.break_key]
lowercase__ : List[Any] = [self.replace_letters(_snake_case ) for char in batch]
lowercase__ : str = numpy.array([vec] ).T
lowercase__ : int = self.modulus(decrypt_key.dot(_snake_case ) ).T.tolist()[0]
lowercase__ : Union[str, Any] = ''''''.join(
self.replace_digits(_snake_case ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __UpperCAmelCase ( ) -> None:
lowercase__ : List[str] = int(input('''Enter the order of the encryption key: ''' ) )
lowercase__ : Any = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__lowerCamelCase ):
lowercase__ : Dict = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
lowercase__ : Tuple = HillCipher(numpy.array(__lowerCamelCase ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
lowercase__ : Optional[int] = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
lowercase__ : Tuple = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
lowercase__ : List[Any] = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase_ ( unittest.TestCase ):
@property
def _lowercase( self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__snake_case )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = self.dummy_uncond_unet
UpperCAmelCase : Dict = DDIMScheduler()
UpperCAmelCase : int = self.dummy_vq_model
UpperCAmelCase : Dict = LDMPipeline(unet=__snake_case , vqvae=__snake_case , scheduler=__snake_case )
ldm.to(__snake_case )
ldm.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=__snake_case , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Any = ldm(generator=__snake_case , num_inference_steps=2 , output_type="""numpy""" , return_dict=__snake_case )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Tuple = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase : Any = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Union[str, Any] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__snake_case )
ldm.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = ldm(generator=__snake_case , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : str = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase : Optional[int] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 265
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as flax_state_f:
_SCREAMING_SNAKE_CASE : Dict = from_bytes(SCREAMING_SNAKE_CASE__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_SCREAMING_SNAKE_CASE : Dict = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = """"""
_SCREAMING_SNAKE_CASE : str = flatten_dict(SCREAMING_SNAKE_CASE__ , sep=""".""" )
_SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
# keep track of unexpected & missing keys
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_SCREAMING_SNAKE_CASE : Any = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_SCREAMING_SNAKE_CASE : Tuple = """.""".join(SCREAMING_SNAKE_CASE__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
_SCREAMING_SNAKE_CASE : int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
_SCREAMING_SNAKE_CASE : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 200
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
UpperCamelCase : str = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_INIT_CONFIGURATION
A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = RealmTokenizer
def __init__( self : int , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Optional[int]="[SEP]" , UpperCAmelCase_ : Tuple="[PAD]" , UpperCAmelCase_ : Any="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , UpperCAmelCase_) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase_) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase_) != tokenize_chinese_chars
):
a : List[str] = getattr(UpperCAmelCase_ , normalizer_state.pop('type'))
a : Tuple = do_lower_case
a : Optional[int] = strip_accents
a : List[Any] = tokenize_chinese_chars
a : str = normalizer_class(**UpperCAmelCase_)
a : int = do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Optional[Any] = PaddingStrategy.MAX_LENGTH
a : Any = text
a : Tuple = kwargs.pop('text_pair' , UpperCAmelCase_)
a : Tuple = kwargs.pop('return_tensors' , UpperCAmelCase_)
a : List[str] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase_):
if batch_text_pair is not None:
a : Union[str, Any] = batch_text_pair[idx]
else:
a : Dict = None
a : str = super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
a : Dict = encoded_candidates.get('input_ids')
a : List[Any] = encoded_candidates.get('attention_mask')
a : Any = encoded_candidates.get('token_type_ids')
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase_)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase_)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase_)
a : str = {key: item for key, item in output_data.items() if len(UpperCAmelCase_) != 0}
return BatchEncoding(UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=None):
"""simple docstring"""
a : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : Dict = [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
a : str = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 345
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345
| 1
|
import numpy as np
def a__ ( UpperCAmelCase : Optional[int] ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def a__ ( UpperCAmelCase : Tuple ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Union[str, Any] = logging.get_logger(__name__)
__a: Union[str, Any] = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "mra"
def __init__( self , __lowerCAmelCase=50265 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="absolute" , __lowerCAmelCase=4 , __lowerCAmelCase="full" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ) -> int:
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = type_vocab_size
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : List[Any] = position_embedding_type
lowercase__ : Union[str, Any] = block_per_row
lowercase__ : Union[str, Any] = approx_mode
lowercase__ : Optional[Any] = initial_prior_first_n_blocks
lowercase__ : Union[str, Any] = initial_prior_diagonal_n_blocks
| 214
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
lowercase__ : Optional[Any] = load_dataset('''ashraq/esc50''' )
lowercase__ : Tuple = dataset['''train''']['''audio'''][-1]['''array''']
lowercase__ : Optional[Any] = audio_classifier(__lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def _lowerCAmelCase( self ) -> str:
pass
@slow
@require_torch
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
lowercase__ : int = load_dataset('''ashraq/esc50''' )
lowercase__ : str = dataset['''train''']['''audio'''][-1]['''array''']
lowercase__ : Any = audio_classifier(__lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
lowercase__ : Dict = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
lowercase__ : Any = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def _lowerCAmelCase( self ) -> Union[str, Any]:
pass
| 214
| 1
|
'''simple docstring'''
from math import isclose, sqrt
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[Any] = point_y / 4 / point_x
lowercase__ : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowercase__ : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowercase__ : Dict = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowercase__ : Any = outgoing_gradient**2 + 4
lowercase__ : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowercase__ : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowercase__ : Tuple = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowercase__ : List[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowercase__ : Optional[Any] = x_minus if isclose(UpperCAmelCase , UpperCAmelCase ) else x_plus
lowercase__ : List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __UpperCamelCase ( UpperCAmelCase = 1.4 , UpperCAmelCase = -9.6 ):
lowercase__ : int = 0
lowercase__ : float = first_x_coord
lowercase__ : float = first_y_coord
lowercase__ : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
lowercase__ , lowercase__ , lowercase__ : str = next_point(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'{solution() = }')
| 198
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a: Optional[Any] = 16
__a: Any = 32
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 ):
lowercase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : List[Any] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Dict = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
UpperCAmelCase , padding='''longest''' , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a: Tuple = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCAmelCase ) == "1":
lowercase__ : Optional[int] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowercase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : int = config['''lr''']
lowercase__ : Optional[int] = int(config['''num_epochs'''] )
lowercase__ : Optional[Any] = int(config['''seed'''] )
lowercase__ : int = int(config['''batch_size'''] )
set_seed(UpperCAmelCase )
lowercase__ , lowercase__ : str = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowercase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase__ : Optional[Any] = os.path.split(UpperCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase__ : str = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowercase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(UpperCAmelCase ),
'''epoch''': epoch,
} , step=UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCamelCase ( ):
lowercase__ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCAmelCase , default=UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowercase__ : str = parser.parse_args()
lowercase__ : Tuple = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 198
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = CanineTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
super().setUp()
__lowercase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def SCREAMING_SNAKE_CASE ( self : str ,**lowercase__ : Optional[Any] ):
__lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowercase__ )
__lowercase = 1_0_2_4
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.canine_tokenizer
__lowercase = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__lowercase = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
__lowercase = tokenizer(lowercase__ ,padding=lowercase__ ,return_tensors='''pt''' )
self.assertIsInstance(lowercase__ ,lowercase__ )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowercase__ ,lowercase__ )
self.assertEqual((2, 3_9) ,batch.input_ids.shape )
self.assertEqual((2, 3_9) ,batch.attention_mask.shape )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.canine_tokenizer
__lowercase = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__lowercase = tokenizer(lowercase__ ,padding=lowercase__ ,return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' ,lowercase__ )
self.assertIn('''attention_mask''' ,lowercase__ )
self.assertIn('''token_type_ids''' ,lowercase__ )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.canine_tokenizer
__lowercase = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__lowercase = tokenizer(
text_target=lowercase__ ,max_length=3_2 ,padding='''max_length''' ,truncation=lowercase__ ,return_tensors='''pt''' )
self.assertEqual(3_2 ,targets['''input_ids'''].shape[1] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# safety check on max_len default value so we are sure the test works
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length ,4_2 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
tokenizer.save_pretrained(lowercase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowercase__ )
__lowercase = after_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
shutil.rmtree(lowercase__ )
__lowercase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__lowercase = chr(0xe_007 )
additional_special_tokens.append(lowercase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
tokenizer.save_pretrained(lowercase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowercase__ )
__lowercase = after_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
self.assertIn(lowercase__ ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,4_2 )
__lowercase = tokenizer.__class__.from_pretrained(lowercase__ ,model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length ,4_3 )
shutil.rmtree(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.get_tokenizers(do_lower_case=lowercase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase , __lowercase = self.get_clean_sequence(lowercase__ )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_005
__lowercase = chr(lowercase__ )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(len(lowercase__ ) ,1 )
__lowercase = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(lowercase__ ,input_encoded + special_token_id )
__lowercase = tokenizer.decode(lowercase__ ,skip_special_tokens=lowercase__ )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.get_tokenizers(do_lower_case=lowercase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = chr(0xe_005 )
__lowercase = chr(0xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=lowercase__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__lowercase = tokenizer.tokenize(lowercase__ )
__lowercase = tokenizer.tokenize(lowercase__ )
self.assertEqual(len(lowercase__ ) ,1 )
self.assertEqual(len(lowercase__ ) ,1 )
self.assertEqual(token_a[0] ,lowercase__ )
self.assertEqual(token_a[0] ,lowercase__ )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.get_tokenizers(do_lower_case=lowercase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__lowercase = 0xe_006
__lowercase = chr(lowercase__ )
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowercase__ )
tokenizer.from_pretrained(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowercase__ )
with open(os.path.join(lowercase__ ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowercase__ )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_006
__lowercase = chr(lowercase__ )
__lowercase = [new_token_a]
__lowercase = [new_token_a]
with open(os.path.join(lowercase__ ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(lowercase__ ,lowercase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(lowercase__ ,extra_ids=0 )
self.assertIn(lowercase__ ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
__lowercase = 0xe_007
__lowercase = chr(lowercase__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = [AddedToken(lowercase__ ,lstrip=lowercase__ )]
__lowercase = tokenizer_class.from_pretrained(
lowercase__ ,additional_special_tokens=lowercase__ ,extra_ids=0 )
self.assertIn(lowercase__ ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.get_tokenizers(do_lower_case=lowercase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = '''hello world'''
if self.space_between_special_tokens:
__lowercase = '''[CLS] hello world [SEP]'''
else:
__lowercase = input
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.decode(lowercase__ ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowercase__ ,[output, output.lower()] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = '''a'''
__lowercase = ord(lowercase__ )
for attr in attributes_list:
setattr(lowercase__ ,attr + '''_id''' ,lowercase__ )
self.assertEqual(getattr(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(getattr(lowercase__ ,attr + '''_id''' ) ,lowercase__ )
setattr(lowercase__ ,attr + '''_id''' ,lowercase__ )
self.assertEqual(getattr(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(getattr(lowercase__ ,attr + '''_id''' ) ,lowercase__ )
setattr(lowercase__ ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(lowercase__ ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(lowercase__ ,'''additional_special_tokens_ids''' ) ,[] )
__lowercase = 0xe_006
__lowercase = chr(lowercase__ )
setattr(lowercase__ ,'''additional_special_tokens_ids''' ,[additional_special_token_id] )
self.assertListEqual(getattr(lowercase__ ,'''additional_special_tokens''' ) ,[additional_special_token] )
self.assertListEqual(getattr(lowercase__ ,'''additional_special_tokens_ids''' ) ,[additional_special_token_id] )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE ( self : str ):
pass
| 52
|
'''simple docstring'''
lowerCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def _A ( ):
"""simple docstring"""
__lowercase = input('''Enter message: ''' )
__lowercase = input('''Enter key [alphanumeric]: ''' )
__lowercase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowercase = '''encrypt'''
__lowercase = encrypt_message(A__ , A__ )
elif mode.lower().startswith('''d''' ):
__lowercase = '''decrypt'''
__lowercase = decrypt_message(A__ , A__ )
print(F"\n{mode.title()}ed message:" )
print(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(A__ , A__ , '''encrypt''' )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(A__ , A__ , '''decrypt''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = 0
__lowercase = key.upper()
for symbol in message:
__lowercase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(A__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(A__ ):
__lowercase = 0
else:
translated.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
main()
| 52
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True})
snake_case__ : ClassVar[Features] = Features({"text": Value("string")})
snake_case__ : ClassVar[Features] = Features({"summary": Value("string")})
snake_case__ : str = "text"
snake_case__ : str = "summary"
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 368
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ) , lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 195
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
UpperCamelCase_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = RealmTokenizer
def __init__( self: Tuple ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: str=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[Any]="[UNK]" ,lowerCamelCase_: str="[SEP]" ,lowerCamelCase_: int="[PAD]" ,lowerCamelCase_: Union[str, Any]="[CLS]" ,lowerCamelCase_: List[Any]="[MASK]" ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Tuple=None ,**lowerCamelCase_: Any ,) -> str:
super().__init__(
lowerCamelCase_ ,tokenizer_file=lowerCamelCase_ ,do_lower_case=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,tokenize_chinese_chars=lowerCamelCase_ ,strip_accents=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,lowerCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,lowerCamelCase_ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Optional[Any] = getattr(lowerCamelCase_ ,normalizer_state.pop("""type""" ) )
UpperCAmelCase_ : Tuple = do_lower_case
UpperCAmelCase_ : Union[str, Any] = strip_accents
UpperCAmelCase_ : List[Any] = tokenize_chinese_chars
UpperCAmelCase_ : Dict = normalizer_class(**lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = do_lower_case
def A__ ( self: Optional[Any] ,lowerCamelCase_: int ,**lowerCamelCase_: str ) -> List[Any]:
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : int = text
UpperCAmelCase_ : Optional[Any] = kwargs.pop("""text_pair""" ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = kwargs.pop("""return_tensors""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCamelCase_ ):
if batch_text_pair is not None:
UpperCAmelCase_ : int = batch_text_pair[idx]
else:
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[str] = super().__call__(lowerCamelCase_ ,lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = encoded_candidates.get("""input_ids""" )
UpperCAmelCase_ : List[Any] = encoded_candidates.get("""attention_mask""" )
UpperCAmelCase_ : Optional[int] = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCamelCase_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCamelCase_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = {key: item for key, item in output_data.items() if len(lowerCamelCase_ ) != 0}
return BatchEncoding(lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Dict=None ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self: int ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase_ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase_ ,name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 345
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345
| 1
|
"""simple docstring"""
_A = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 357
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_lowerCamelCase , )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_lowerCamelCase , )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def a__ ( ) -> List[str]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@require_beam
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Dict = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a (self ):
"""simple docstring"""
import apache_beam as beam
UpperCAmelCase__ : Optional[int] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
UpperCAmelCase__ : int = partial(_lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=_lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = NestedBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
UpperCAmelCase__ : str = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 166
| 0
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Any = """maskformer"""
__lowerCamelCase : Tuple = {"""hidden_size""": """mask_feature_size"""}
__lowerCamelCase : Union[str, Any] = ["""resnet""", """swin"""]
__lowerCamelCase : List[Any] = ["""detr"""]
def __init__( self , a = 256 , a = 256 , a = 0.1 , a = False , a = None , a = None , a = 0.02 , a = 1.0 , a = 1.0 , a = 1.0 , a = 20.0 , a = None , **a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : str = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(a , a):
lowercase__ : List[str] = backbone_config.pop('model_type')
lowercase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : Optional[Any] = config_class.from_dict(a)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported)}""")
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : List[Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Optional[int] = (
decoder_config.pop('model_type') if isinstance(a , a) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported)}""")
if isinstance(a , a):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[int] = config_class.from_dict(a)
lowercase__ : List[Any] = backbone_config
lowercase__ : str = decoder_config
# main feature dimension for the model
lowercase__ : str = fpn_feature_size
lowercase__ : Optional[Any] = mask_feature_size
# initializer
lowercase__ : List[str] = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Any = cross_entropy_weight
lowercase__ : str = dice_weight
lowercase__ : Tuple = mask_weight
lowercase__ : List[str] = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[int] = output_auxiliary_logits
lowercase__ : Optional[int] = self.decoder_config.encoder_attention_heads
lowercase__ : Union[str, Any] = self.decoder_config.num_hidden_layers
super().__init__(**a)
@classmethod
def snake_case_ ( cls , a , a , **a):
return cls(
backbone_config=a , decoder_config=a , **a , )
def snake_case_ ( self):
lowercase__ : Optional[int] = copy.deepcopy(self.__dict__)
lowercase__ : List[str] = self.backbone_config.to_dict()
lowercase__ : List[str] = self.decoder_config.to_dict()
lowercase__ : List[Any] = self.__class__.model_type
return output
| 214
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError('String lengths must match!' )
lowercase__ : Union[str, Any] = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase = '\\n\n'
__UpperCAmelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__UpperCAmelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1_6 , _UpperCamelCase = True , _UpperCamelCase=None ) -> str:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase_ : Dict = 'cuda'
else:
UpperCAmelCase_ : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase_ : Dict = AutoModelForCausalLM.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : int = model.to(_UpperCamelCase )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase_ : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase_ : List[str] = model.config.max_length - 1
else:
UpperCAmelCase_ : str = model.config.max_length
UpperCAmelCase_ : Optional[Any] = tokenizer(
_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='pt' , return_attention_mask=_UpperCamelCase , ).to(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = encodings['input_ids']
UpperCAmelCase_ : Optional[int] = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Union[str, Any] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(_UpperCamelCase ) , _UpperCamelCase ) ):
UpperCAmelCase_ : Tuple = min(start_index + batch_size , len(_UpperCamelCase ) )
UpperCAmelCase_ : Union[str, Any] = encoded_texts[start_index:end_index]
UpperCAmelCase_ : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase_ : List[str] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase_ : Any = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCamelCase ), attn_mask] , dim=1 )
UpperCAmelCase_ : List[str] = encoded_batch
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).logits
UpperCAmelCase_ : List[Any] = out_logits[..., :-1, :].contiguous()
UpperCAmelCase_ : Dict = labels[..., 1:].contiguous()
UpperCAmelCase_ : Optional[int] = attn_mask[..., 1:].contiguous()
UpperCAmelCase_ : Optional[int] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCamelCase )}
| 353
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = RemBertConfig.from_json_file(__snake_case )
print('Building PyTorch model from configuration: {}'.format(str(__snake_case ) ) )
UpperCAmelCase_ : Dict = RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print('Save PyTorch model to {}'.format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 145
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowerCamelCase : Tuple = (720, 1280) # Height, Width
__lowerCamelCase : int = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowerCamelCase : int = 1 / 100
__lowerCamelCase : Any = """"""
__lowerCamelCase : List[str] = """"""
__lowerCamelCase : List[Any] = """"""
__lowerCamelCase : Tuple = 250
def A_ ( ) -> None:
UpperCamelCase , UpperCamelCase : Tuple = get_dataset(_lowerCAmelCase , _lowerCAmelCase )
for index in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = random.sample(range(len(_lowerCAmelCase ) ) , 4 )
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = update_image_and_anno(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , filter_scale=_lowerCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Union[str, Any] = random_chars(32 )
UpperCamelCase : Optional[Any] = path.split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCamelCase : Optional[Any] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , _lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
UpperCamelCase : int = []
for anno in new_annos:
UpperCamelCase : Dict = anno[3] - anno[1]
UpperCamelCase : Union[str, Any] = anno[4] - anno[2]
UpperCamelCase : Optional[int] = anno[1] + width / 2
UpperCamelCase : Tuple = anno[2] + height / 2
UpperCamelCase : Dict = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_lowerCAmelCase )
with open(F"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[list, list]:
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
for label_file in glob.glob(os.path.join(_lowerCAmelCase , "*.txt" ) ):
UpperCamelCase : Union[str, Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCAmelCase ) as in_file:
UpperCamelCase : int = in_file.readlines()
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , F"""{label_name}.jpg""" )
UpperCamelCase : str = []
for obj_list in obj_lists:
UpperCamelCase : Optional[int] = obj_list.rstrip("\n" ).split(" " )
UpperCamelCase : Dict = float(obj[1] ) - float(obj[3] ) / 2
UpperCamelCase : List[str] = float(obj[2] ) - float(obj[4] ) / 2
UpperCamelCase : Dict = float(obj[1] ) + float(obj[3] ) / 2
UpperCamelCase : int = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCAmelCase )
labels.append(_lowerCAmelCase )
return img_paths, labels
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , ) -> tuple[list, list, str]:
UpperCamelCase : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCamelCase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCamelCase : Dict = int(scale_x * output_size[1] )
UpperCamelCase : Tuple = int(scale_y * output_size[0] )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : int = []
for i, index in enumerate(_lowerCAmelCase ):
UpperCamelCase : str = all_img_list[index]
path_list.append(_lowerCAmelCase )
UpperCamelCase : Tuple = all_annos[index]
UpperCamelCase : Union[str, Any] = cva.imread(_lowerCAmelCase )
if i == 0: # top-left
UpperCamelCase : int = cva.resize(_lowerCAmelCase , (divid_point_x, divid_point_y) )
UpperCamelCase : int = img
for bbox in img_annos:
UpperCamelCase : Any = bbox[1] * scale_x
UpperCamelCase : Optional[Any] = bbox[2] * scale_y
UpperCamelCase : str = bbox[3] * scale_x
UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCamelCase : Any = cva.resize(_lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
UpperCamelCase : str = img
for bbox in img_annos:
UpperCamelCase : str = scale_x + bbox[1] * (1 - scale_x)
UpperCamelCase : Tuple = bbox[2] * scale_y
UpperCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
UpperCamelCase : str = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
UpperCamelCase : Dict = img
for bbox in img_annos:
UpperCamelCase : str = bbox[1] * scale_x
UpperCamelCase : List[str] = scale_y + bbox[2] * (1 - scale_y)
UpperCamelCase : Dict = bbox[3] * scale_x
UpperCamelCase : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCamelCase : str = cva.resize(
_lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCamelCase : Optional[int] = img
for bbox in img_annos:
UpperCamelCase : Dict = scale_x + bbox[1] * (1 - scale_x)
UpperCamelCase : Any = scale_y + bbox[2] * (1 - scale_y)
UpperCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
UpperCamelCase : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCamelCase : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def A_ ( _lowerCAmelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 52
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = KandinskyInpaintPipeline
_UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase :Dict = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase :Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :int = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase : Optional[int] = MultilingualCLIP(A_ )
UpperCamelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.dummy_text_encoder
UpperCamelCase : str = self.dummy_tokenizer
UpperCamelCase : List[Any] = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Optional[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase : str = 0
if str(A_ ).startswith("mps" ):
UpperCamelCase : int = torch.manual_seed(A_ )
else:
UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "cpu"
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**A_ )
UpperCamelCase : Tuple = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : List[Any] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = "a hat"
UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase : Optional[Any] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : Dict = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 52
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(_A , '''num_attention_heads'''))
self.parent.assertTrue(hasattr(_A , '''num_encoder_blocks'''))
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : List[str]=64 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[2, 2, 2, 2] , UpperCAmelCase__ : Tuple=[8, 4, 2, 1] , UpperCAmelCase__ : List[str]=[16, 32, 64, 128] , UpperCAmelCase__ : int=[1, 4, 8, 16] , UpperCAmelCase__ : List[str]=[1, 2, 4, 8] , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : int=None , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]) ->int:
'''simple docstring'''
A__ = SegformerModel(config=_A)
model.to(_A)
model.eval()
A__ = model(_A)
A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_A)
model.to(_A)
model.eval()
A__ = model(_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
A__ = model(_A , labels=_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str) ->List[Any]:
'''simple docstring'''
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_A)
model.to(_A)
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A)
A__ = model(_A , labels=_A)
self.parent.assertGreater(result.loss , 0.0)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ = SegformerModelTester(self)
A__ = SegformerConfigTester(self , config_class=_A)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A)
def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A)
@unittest.skip('''SegFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''')
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_A)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_A , _A))
A__ = outputs.attentions
A__ = sum(self.model_tester.depths)
self.assertEqual(len(_A) , _A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_A , _A))
A__ = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_A)
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
A__ = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple):
A__ = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_A , _A))
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A) , _A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_A , _A , _A)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
if not self.model_tester.is_training:
return
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_A):
continue
A__ = model_class(_A)
model.to(_A)
model.train()
A__ = self._prepare_for_class(_A , _A , return_labels=_A)
A__ = model(**_A).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
A__ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''').to(
_A)
A__ = prepare_img()
A__ = image_processor(images=_A , return_tensors='''pt''')
A__ = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
A__ = model(_A)
A__ = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , _A)
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
A__ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''').to(_A)
A__ = prepare_img()
A__ = image_processor(images=_A , return_tensors='''pt''')
A__ = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
A__ = model(_A)
A__ = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , _A)
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1e-1))
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
A__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
A__ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''').to(
_A)
A__ = prepare_img()
A__ = image_processor(images=_A , return_tensors='''pt''')
A__ = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
A__ = model(_A)
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(500, 300)])
A__ = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , _A)
A__ = image_processor.post_process_semantic_segmentation(outputs=_A)
A__ = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , _A)
| 357
|
from __future__ import annotations
from collections import Counter
from random import random
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ = {}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str) ->None:
'''simple docstring'''
A__ = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : float) ->None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCAmelCase__)
if nodea not in self.connections:
self.add_node(UpperCAmelCase__)
A__ = probability
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->list[str]:
'''simple docstring'''
return list(self.connections)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->str:
'''simple docstring'''
A__ = 0
A__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, int]:
"""simple docstring"""
A__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ , lowercase_ , lowercase_ )
A__ = Counter(graph.get_nodes() )
A__ = start
for _ in range(lowercase_ ):
A__ = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('Input value must be an \'int\' type' )
lowercase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=18,__SCREAMING_SNAKE_CASE=30,__SCREAMING_SNAKE_CASE=4_00,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5],__SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5],):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 20}
__lowerCAmelCase = do_thumbnail
__lowerCAmelCase = do_align_axis
__lowerCAmelCase = do_pad
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[Any] =DonutImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessingTester(self )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""do_thumbnail""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""do_align_long_axis""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""do_pad""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""image_std""" ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"""height""": 18, """width""": 20} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict,size=42 )
self.assertEqual(image_processor.size,{"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict,size=(42, 84) )
self.assertEqual(image_processor.size,{"""height""": 84, """width""": 42} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@is_flaky()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE,Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0],return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
),)
# Test batched
__lowerCAmelCase = image_processing(__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
),)
@is_flaky()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=__SCREAMING_SNAKE_CASE,numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE,np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0],return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
),)
# Test batched
__lowerCAmelCase = image_processing(__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
),)
@is_flaky()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=__SCREAMING_SNAKE_CASE,torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE,torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0],return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
),)
# Test batched
__lowerCAmelCase = image_processing(__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
),)
| 46
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _lowerCAmelCase ( lowercase=None ) -> Any:
__lowerCAmelCase = argparse.ArgumentParser(add_help=lowercase , allow_abbrev=lowercase )
# The main config parser
__lowerCAmelCase = config_command_parser(lowercase )
# The subparser to add commands to
__lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowercase , parents=[parent_parser] )
update_command_parser(lowercase , parents=[parent_parser] )
return config_parser
def _lowerCAmelCase ( ) -> List[Any]:
__lowerCAmelCase = get_config_parser()
__lowerCAmelCase = config_parser.parse_args()
if not hasattr(lowercase , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 46
| 1
|
'''simple docstring'''
import math
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
"""simple docstring"""
return math.pow(_UpperCamelCase , 2 ) - a
def _lowerCAmelCase ( _UpperCamelCase : float ) -> float:
"""simple docstring"""
return 2 * x
def _lowerCAmelCase ( _UpperCamelCase : float ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =2.0
while start <= a:
_SCREAMING_SNAKE_CASE =math.pow(_UpperCamelCase , 2 )
return start
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : int = 99_99 , _UpperCamelCase : float = 0.00_00_00_00_00_00_01 ) -> float:
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
_SCREAMING_SNAKE_CASE =get_initial_point(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =value - fx(_UpperCamelCase , _UpperCamelCase ) / fx_derivative(_UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
|
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[True] * limit
__lowercase =False
__lowercase =False
__lowercase =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__lowercase =i * 2
while index < limit:
__lowercase =False
__lowercase =index + i
__lowercase =[2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def _A ( _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =prime_sieve(_lowerCAmelCase )
__lowercase =0
__lowercase =0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
__lowercase =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__lowercase =j - i
__lowercase =sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 166
| 0
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase__ : List[str] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase__ : Optional[int] = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def UpperCamelCase_ ( lowerCAmelCase__ : Any ) -> str:
"""simple docstring"""
lowerCAmelCase_ : int = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCAmelCase__ )[0]
@deprecated(lowerCAmelCase__ , 'Please use tf.data to implement this functionality.' )
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase__ ) as bytestream:
lowerCAmelCase_ : Any = _readaa(lowerCAmelCase__ )
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase_ : str = _readaa(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = _readaa(lowerCAmelCase__ )
lowerCAmelCase_ : str = _readaa(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = bytestream.read(rows * cols * num_images )
lowerCAmelCase_ : Tuple = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta )
lowerCAmelCase_ : List[str] = data.reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 1 )
return data
@deprecated(lowerCAmelCase__ , 'Please use tf.one_hot on tensors.' )
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = labels_dense.shape[0]
lowerCAmelCase_ : Optional[int] = numpy.arange(lowerCAmelCase__ ) * num_classes
lowerCAmelCase_ : Tuple = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase_ : List[Any] = 1
return labels_one_hot
@deprecated(lowerCAmelCase__ , 'Please use tf.data to implement this functionality.' )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Tuple=10 ) -> int:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase__ ) as bytestream:
lowerCAmelCase_ : Optional[Any] = _readaa(lowerCAmelCase__ )
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase_ : int = _readaa(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = bytestream.read(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCAmelCase__ , lowerCAmelCase__ )
return labels
class UpperCamelCase__ :
"""simple docstring"""
@deprecated(
_a , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=dtypes.floataa , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=None , ):
lowerCAmelCase_ : int = random_seed.get_seed(_a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase_ : Dict = dtypes.as_dtype(_a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase_ : List[Any] = 1_0_0_0_0
lowerCAmelCase_ : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase_ : Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase_ : List[str] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase_ : Tuple = images.astype(numpy.floataa )
lowerCAmelCase_ : List[Any] = numpy.multiply(_a , 1.0 / 2_5_5.0 )
lowerCAmelCase_ : str = images
lowerCAmelCase_ : Dict = labels
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Optional[Any] = 0
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self._images
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return self._labels
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return self._num_examples
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self._epochs_completed
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True ):
if fake_data:
lowerCAmelCase_ : str = [1] * 7_8_4
lowerCAmelCase_ : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_a )],
[fake_label for _ in range(_a )],
)
lowerCAmelCase_ : List[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase_ : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(_a )
lowerCAmelCase_ : int = self.images[perma]
lowerCAmelCase_ : Dict = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase_ : Tuple = self._num_examples - start
lowerCAmelCase_ : str = self._images[start : self._num_examples]
lowerCAmelCase_ : Optional[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase_ : List[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_a )
lowerCAmelCase_ : Any = self.images[perm]
lowerCAmelCase_ : Optional[int] = self.labels[perm]
# Start next epoch
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = batch_size - rest_num_examples
lowerCAmelCase_ : Any = self._index_in_epoch
lowerCAmelCase_ : List[str] = self._images[start:end]
lowerCAmelCase_ : Tuple = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase_ : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCAmelCase__ , 'Please write your own downloading logic.' )
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not gfile.Exists(lowerCAmelCase__ ):
gfile.MakeDirs(lowerCAmelCase__ )
lowerCAmelCase_ : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not gfile.Exists(lowerCAmelCase__ ):
urllib.request.urlretrieve(lowerCAmelCase__ , lowerCAmelCase__ ) # noqa: S310
with gfile.GFile(lowerCAmelCase__ ) as f:
lowerCAmelCase_ : Optional[int] = f.size()
print('Successfully downloaded' , lowerCAmelCase__ , lowerCAmelCase__ , 'bytes.' )
return filepath
@deprecated(
lowerCAmelCase__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Optional[int]=dtypes.floataa , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[int]=5000 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Union[str, Any]=DEFAULT_SOURCE_URL , ) -> Any:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCAmelCase__ , one_hot=lowerCAmelCase__ , dtype=lowerCAmelCase__ , seed=lowerCAmelCase__ )
lowerCAmelCase_ : Any = fake()
lowerCAmelCase_ : Optional[Any] = fake()
lowerCAmelCase_ : str = fake()
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__ )
if not source_url: # empty string check
lowerCAmelCase_ : int = DEFAULT_SOURCE_URL
lowerCAmelCase_ : str = "train-images-idx3-ubyte.gz"
lowerCAmelCase_ : int = "train-labels-idx1-ubyte.gz"
lowerCAmelCase_ : List[str] = "t10k-images-idx3-ubyte.gz"
lowerCAmelCase_ : Tuple = "t10k-labels-idx1-ubyte.gz"
lowerCAmelCase_ : Union[str, Any] = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_images_file )
with gfile.Open(lowerCAmelCase__ , 'rb' ) as f:
lowerCAmelCase_ : int = _extract_images(lowerCAmelCase__ )
lowerCAmelCase_ : str = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_labels_file )
with gfile.Open(lowerCAmelCase__ , 'rb' ) as f:
lowerCAmelCase_ : int = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__ )
lowerCAmelCase_ : Any = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_images_file )
with gfile.Open(lowerCAmelCase__ , 'rb' ) as f:
lowerCAmelCase_ : int = _extract_images(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_labels_file )
with gfile.Open(lowerCAmelCase__ , 'rb' ) as f:
lowerCAmelCase_ : Optional[Any] = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__ )
if not 0 <= validation_size <= len(lowerCAmelCase__ ):
lowerCAmelCase_ : List[Any] = (
"Validation size should be between 0 and "
f"{len(lowerCAmelCase__ )}. Received: {validation_size}."
)
raise ValueError(lowerCAmelCase__ )
lowerCAmelCase_ : Any = train_images[:validation_size]
lowerCAmelCase_ : Optional[int] = train_labels[:validation_size]
lowerCAmelCase_ : Dict = train_images[validation_size:]
lowerCAmelCase_ : Optional[int] = train_labels[validation_size:]
lowerCAmelCase_ : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowerCAmelCase_ : Tuple = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCAmelCase_ : Any = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCAmelCase_ : Any = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__ )
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Dict = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289
| 0
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_a : int = 'pt'
elif is_tf_available():
_a : Union[str, Any] = 'tf'
else:
_a : Union[str, Any] = 'jax'
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = ByTaTokenizer
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def __A ( self , **a__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ , a__=False , a__=20 , a__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowerCAmelCase : Optional[int] = []
for i in range(len(a__ ) ):
try:
_lowerCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCAmelCase : Tuple = list(filter(lambda a__ : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , a__ ) )
_lowerCAmelCase : str = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
_lowerCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
_lowerCAmelCase : str = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : Any = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
_lowerCAmelCase : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
_lowerCAmelCase : int = """ """ + output_txt
_lowerCAmelCase : Dict = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def __A ( self ):
_lowerCAmelCase : Tuple = self.ta_base_tokenizer
_lowerCAmelCase : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_lowerCAmelCase : Dict = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def __A ( self ):
_lowerCAmelCase : str = self.ta_base_tokenizer
_lowerCAmelCase : List[str] = """Unicode €."""
_lowerCAmelCase : Optional[Any] = tokenizer(a__ )
_lowerCAmelCase : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , a__ )
# decoding
_lowerCAmelCase : Optional[int] = tokenizer.decode(a__ )
self.assertEqual(a__ , """Unicode €.</s>""" )
_lowerCAmelCase : Tuple = tokenizer("""e è é ê ë""" )
_lowerCAmelCase : Any = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , a__ )
# decoding
_lowerCAmelCase : Any = tokenizer.decode(a__ )
self.assertEqual(a__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def __A ( self ):
_lowerCAmelCase : Dict = self.ta_base_tokenizer
_lowerCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_lowerCAmelCase : str = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowerCAmelCase : List[str] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
_lowerCAmelCase : List[str] = list(batch.input_ids.numpy()[0] )
else:
_lowerCAmelCase : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self ):
_lowerCAmelCase : Tuple = self.ta_base_tokenizer
_lowerCAmelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase : List[str] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , a__ )
self.assertIn("""attention_mask""" , a__ )
self.assertNotIn("""decoder_input_ids""" , a__ )
self.assertNotIn("""decoder_attention_mask""" , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.ta_base_tokenizer
_lowerCAmelCase : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
_lowerCAmelCase : Any = tokenizer(
text_target=a__ , max_length=32 , padding="""max_length""" , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __A ( self ):
_lowerCAmelCase : int = self.ta_base_tokenizer
_lowerCAmelCase : List[str] = ["""A long paragraph for summarization. </s>"""]
_lowerCAmelCase : List[str] = ["""Summary of the text. </s>"""]
# fmt: off
_lowerCAmelCase : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowerCAmelCase : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowerCAmelCase : Optional[Any] = tokenizer(a__ , text_target=a__ )
self.assertEqual(a__ , batch["""input_ids"""][0] )
self.assertEqual(a__ , batch["""labels"""][0] )
def __A ( self ):
# safety check on max_len default value so we are sure the test works
_lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = """ He is very happy, UNwant\u00E9d,running"""
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
_lowerCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(a__ )
_lowerCAmelCase : Any = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
_lowerCAmelCase : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : List[str] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_lowerCAmelCase : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_lowerCAmelCase : List[str] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
_lowerCAmelCase : str = tokenizer.__class__.from_pretrained(a__ )
_lowerCAmelCase : List[str] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowerCAmelCase : Any = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_lowerCAmelCase : List[Any] = json.load(a__ )
with open(os.path.join(a__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_lowerCAmelCase : Tuple = json.load(a__ )
_lowerCAmelCase : int = [F"<extra_id_{i}>" for i in range(125 )]
_lowerCAmelCase : List[str] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_lowerCAmelCase : Tuple = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCAmelCase : str = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCAmelCase : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=a__ )]
_lowerCAmelCase : Dict = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __A ( self ):
_lowerCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
_lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(a__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
_lowerCAmelCase : str = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : Dict = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(
a__ , skip_special_tokens=a__ )
for attr in attributes_list:
setattr(a__ , attr + """_id""" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + """_id""" ) , a__ )
setattr(a__ , attr + """_id""" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + """_id""" ) , a__ )
setattr(a__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(a__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(a__ , """additional_special_tokens_ids""" ) , [] )
setattr(a__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(a__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(a__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 44
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ['''input_features''', '''attention_mask''']
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any]=8_0 , lowerCAmelCase__ : Tuple=1_6_0_0_0 , lowerCAmelCase__ : Union[str, Any]=8_0 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[Any]=True , **lowerCAmelCase__ : int , ) -> int:
"""simple docstring"""
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : str = num_mel_bins
_UpperCAmelCase : Optional[int] = do_ceptral_normalize
_UpperCAmelCase : List[str] = normalize_means
_UpperCAmelCase : str = normalize_vars
_UpperCAmelCase : Union[str, Any] = True
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : Tuple = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_UpperCAmelCase : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
_UpperCAmelCase : List[Any] = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : float = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
_UpperCAmelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : Dict = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if normalize_vars:
_UpperCAmelCase : Any = x[:input_length].std(axis=0 )
_UpperCAmelCase : Optional[int] = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
_UpperCAmelCase : str = padding_value
# make sure array is in float32
_UpperCAmelCase : Union[str, Any] = x.astype(np.floataa )
return x
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[np.ndarray] , lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
"""simple docstring"""
_UpperCAmelCase : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
def __call__( self : List[Any] , lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Optional[Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : List[Any] = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Any = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : Dict = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[Any] = [raw_speech]
# extract fbank features
_UpperCAmelCase : Tuple = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : Optional[Any] = BatchFeature({"input_features": features} )
_UpperCAmelCase : Optional[Any] = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
_UpperCAmelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCAmelCase__ ):
_UpperCAmelCase : int = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_UpperCAmelCase : Dict = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCAmelCase : List[str] = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase : str = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
_UpperCAmelCase : Any = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 145
| 0
|
UpperCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 351
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26
| 0
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_a : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_a : Optional[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('RGB' )
_a : Optional[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_a : List[str] = transform(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
return image
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if "visual_encoder" in key:
_a : List[str] = re.sub('visual_encoder*' , 'vision_model.encoder' , lowerCAmelCase_ )
if "blocks" in key:
_a : int = re.sub(r'blocks' , 'layers' , lowerCAmelCase_ )
if "attn" in key:
_a : Optional[int] = re.sub(r'attn' , 'self_attn' , lowerCAmelCase_ )
if "norm1" in key:
_a : List[str] = re.sub(r'norm1' , 'layer_norm1' , lowerCAmelCase_ )
if "norm2" in key:
_a : str = re.sub(r'norm2' , 'layer_norm2' , lowerCAmelCase_ )
if "encoder.norm" in key:
_a : int = re.sub(r'encoder.norm' , 'post_layernorm' , lowerCAmelCase_ )
if "encoder.patch_embed.proj" in key:
_a : Union[str, Any] = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowerCAmelCase_ )
if "encoder.pos_embed" in key:
_a : Tuple = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , lowerCAmelCase_ )
if "encoder.cls_token" in key:
_a : Tuple = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , lowerCAmelCase_ )
if "self_attn" in key:
_a : Any = re.sub(r'self_attn.proj' , 'self_attn.projection' , lowerCAmelCase_ )
return key
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None ) -> int:
if config_path is not None:
_a : Tuple = BlipConfig.from_pretrained(lowerCAmelCase_ )
else:
_a : int = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_a : Dict = BlipForConditionalGeneration(lowerCAmelCase_ ).eval()
_a : Any = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_a : List[str] = blip_decoder(pretrained=lowerCAmelCase_ , image_size=384 , vit='base' )
_a : int = pt_model.eval()
_a : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
_a : List[str] = modified_state_dict.pop(lowerCAmelCase_ )
_a : Optional[Any] = rename_key(lowerCAmelCase_ )
_a : Any = value
hf_model.load_state_dict(lowerCAmelCase_ )
_a : Optional[int] = 384
_a : List[str] = load_demo_image(image_size=lowerCAmelCase_ , device='cpu' )
_a : Optional[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
_a : int = tokenizer(['a picture of'] ).input_ids
_a : List[str] = hf_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
_a : List[str] = hf_model.generate(lowerCAmelCase_ )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_a : Tuple = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_a : Optional[Any] = blip_vqa(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='base' )
vqa_model.eval()
_a : Optional[int] = vqa_model.state_dict()
for key in modified_state_dict.copy():
_a : int = modified_state_dict.pop(lowerCAmelCase_ )
_a : Optional[int] = rename_key(lowerCAmelCase_ )
_a : Dict = value
_a : Optional[int] = BlipForQuestionAnswering(lowerCAmelCase_ )
hf_vqa_model.load_state_dict(lowerCAmelCase_ )
_a : List[str] = ['How many dogs are in this image?']
_a : str = tokenizer(lowerCAmelCase_ , return_tensors='pt' ).input_ids
_a : Dict = hf_vqa_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_a : int = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_a : List[str] = blip_itm(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='base' )
itm_model.eval()
_a : Any = itm_model.state_dict()
for key in modified_state_dict.copy():
_a : Optional[int] = modified_state_dict.pop(lowerCAmelCase_ )
_a : Any = rename_key(lowerCAmelCase_ )
_a : Dict = value
_a : Tuple = BlipForImageTextRetrieval(lowerCAmelCase_ )
_a : Any = ['A picture of a woman with a dog sitting in a beach']
_a : List[str] = tokenizer(
lowerCAmelCase_ , return_tensors='pt' , padding='max_length' , truncation=lowerCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase_ )
hf_itm_model.eval()
_a : str = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
_a : Any = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 89
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=[2, 3, 4] , _UpperCamelCase=None , ) -> Union[str, Any]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_stages
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = out_features
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = scope
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[str]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
lowerCAmelCase_ = ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase_ = None
lowerCAmelCase_ = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
def __a ( self ) -> str:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __a , __a , unittest.TestCase ):
_lowercase =(
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowercase =(
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowercase =False
_lowercase =False
_lowercase =False
_lowercase =False
_lowercase =False
def __a ( self ) -> Tuple:
lowerCAmelCase_ = ConvNextVaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __a ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ) -> Any:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __a ( self ) -> List[Any]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __a ( self ) -> int:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __a ( self ) -> str:
pass
def __a ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase_ = True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
lowerCAmelCase_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
lowerCAmelCase_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowerCAmelCase_ = model(**_UpperCamelCase ).loss
loss.backward()
def __a ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase_ = False
lowerCAmelCase_ = True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowerCAmelCase_ = model(**_UpperCamelCase ).loss
loss.backward()
def __a ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_UpperCamelCase )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __a ( self ) -> List[Any]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __a ( self ) -> Optional[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> int:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(_UpperCamelCase )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = preprocessor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**_UpperCamelCase )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowerCAmelCase_ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 231
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_snake_case : Any = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Tuple = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( __magic_name__ ):
lowercase__ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ = numpy_to_pil(__SCREAMING_SNAKE_CASE )
return images
def _A ( __magic_name__ ):
if images.ndim == 3:
lowercase__ = images[None, ...]
lowercase__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowercase__ = [Image.fromarray(__SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 370
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys]
lowercase__ = Counter(_lowercase )
lowercase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase ( self :Any , _lowercase :str , _lowercase :Dict=False ):
'''simple docstring'''
lowercase__ = super().construct_mapping(_lowercase , deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def _A ( __magic_name__ ):
lowercase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ = full_content[1:].index("---" ) + 1
lowercase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__magic_name__ )
class lowerCAmelCase ( lowercase_ ):
# class attributes
__lowerCamelCase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls :Dict , _lowercase :Path ):
'''simple docstring'''
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ , lowercase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def UpperCAmelCase ( self :Any , _lowercase :Path ):
'''simple docstring'''
if path.exists():
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ = readme_file.read()
else:
lowercase__ = None
lowercase__ = self._to_readme(_lowercase )
with open(_lowercase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(_lowercase )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
lowercase__ , lowercase__ = _split_yaml_from_readme(_lowercase )
lowercase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowercase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowercase , allow_unicode=_lowercase , encoding="utf-8" , ).decode("utf-8" )
_snake_case = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_snake_case = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_snake_case = ap.parse_args()
_snake_case = Path(args.readme_filepath)
_snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 201
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'bloom'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self , lowercase=250_880 , lowercase=64 , lowercase=2 , lowercase=8 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=1 , lowercase=2 , lowercase=False , lowercase=0.0 , lowercase=0.0 , lowercase=1 , lowercase=False , **lowercase , ) -> Tuple:
lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase = kwargs.pop("""n_embed""" , lowercase )
lowerCAmelCase = hidden_size if n_embed is None else n_embed
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = use_cache
lowerCAmelCase = pretraining_tp
lowerCAmelCase = apply_residual_connection_post_layernorm
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = slow_but_exact
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = version.parse('1.12' )
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
lowerCAmelCase = 0
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowercase , direction="""inputs""" , inverted_values_shape=lowercase )
lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _snake_case ( self ) -> int:
return self._config.n_layer
@property
def _snake_case ( self ) -> int:
return self._config.n_head
@property
def _snake_case ( self ) -> float:
return 1e-3
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase , lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase = seqlen + 2
lowerCAmelCase = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ) -> int:
return 13
| 46
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = '''blip_text_model'''
def __init__( self : Tuple , _UpperCAmelCase : int=30_524 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[Any]=3_072 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Union[str, Any]=1E-1_2 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=30_522 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[int]=102 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : str , ):
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , sep_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
_A = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = '''blip_vision_model'''
def __init__( self : Any , _UpperCAmelCase : int=768 , _UpperCAmelCase : int=3_072 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : str=384 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[str]=1E-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Union[str, Any]=1E-1_0 , **_UpperCAmelCase : List[Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
_A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''blip'''
UpperCAmelCase : Tuple = True
def __init__( self : Union[str, Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : List[str]=2.6592 , _UpperCAmelCase : Optional[int]=256 , **_UpperCAmelCase : Tuple , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
_A = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
_A = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
_A = BlipTextConfig(**_UpperCAmelCase )
_A = BlipVisionConfig(**_UpperCAmelCase )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.02
_A = image_text_hidden_size
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , _UpperCAmelCase : BlipTextConfig , _UpperCAmelCase : BlipVisionConfig , **_UpperCAmelCase : List[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 271
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a = logging.getLogger(__name__)
def _snake_case ( _snake_case : str , _snake_case : Tuple ) -> Any:
'''simple docstring'''
if os.path.exists(_snake_case ):
if os.path.exists(os.path.join(_snake_case , 'config.json' ) ) and os.path.isfile(
os.path.join(_snake_case , 'config.json' ) ):
os.remove(os.path.join(_snake_case , 'config.json' ) )
if os.path.exists(os.path.join(_snake_case , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_snake_case , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_snake_case , 'pytorch_model.bin' ) )
else:
os.makedirs(_snake_case )
model.save_pretrained(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[int]=False ) -> Tuple:
'''simple docstring'''
_A = 2
if unlogit:
_A = torch.pow(_snake_case , _snake_case )
_A = p * torch.log(_snake_case )
_A = 0
return -plogp.sum(dim=-1 )
def _snake_case ( _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(_snake_case ) ) ) )
for row in range(len(_snake_case ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _snake_case ( _snake_case : List[str] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Union[str, Any]=True , _snake_case : Any=True , _snake_case : List[str]=None , _snake_case : List[Any]=False ) -> int:
'''simple docstring'''
_A , _A = model.config.num_hidden_layers, model.config.num_attention_heads
_A = torch.zeros(_snake_case , _snake_case ).to(args.device )
_A = torch.zeros(_snake_case , _snake_case ).to(args.device )
if head_mask is None:
_A = torch.ones(_snake_case , _snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=_snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A = None
_A = 0.0
_A = 0.0
for step, inputs in enumerate(tqdm(_snake_case , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_A = tuple(t.to(args.device ) for t in inputs )
((_A) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A = model(_snake_case , labels=_snake_case , head_mask=_snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A , _A , _A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_snake_case ):
_A = entropy(attn.detach() , _snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A = 2
_A = torch.pow(torch.pow(_snake_case , _snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_snake_case )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_snake_case )
logger.info('Head ranked by importance scores' )
_A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A = torch.arange(
head_importance.numel() , device=args.device )
_A = head_ranks.view_as(_snake_case )
print_ad_tensor(_snake_case )
return attn_entropy, head_importance, total_loss
def _snake_case ( _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
_A , _A , _A = compute_heads_importance(_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case )
_A = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _snake_case , original_score * args.masking_threshold )
_A = torch.ones_like(_snake_case )
_A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A = original_score
while current_score >= original_score * args.masking_threshold:
_A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A = float('Inf' )
_A = head_importance.view(-1 ).sort()[1]
if len(_snake_case ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_A = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_A = new_head_mask.view(-1 )
_A = 0.0
_A = new_head_mask.view_as(_snake_case )
_A = new_head_mask.clone().detach()
print_ad_tensor(_snake_case )
# Compute metric and head importance again
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , head_mask=_snake_case )
_A = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(_snake_case )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , compute_importance=_snake_case , head_mask=_snake_case )
_A = 1 / loss
_A = datetime.now() - before_time
_A = sum(p.numel() for p in model.parameters() )
_A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(_snake_case , _snake_case ):
_A = [
v,
]
assert sum(len(_snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_snake_case )
_A = sum(p.numel() for p in model.parameters() )
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , compute_importance=_snake_case , head_mask=_snake_case , actually_pruned=_snake_case , )
_A = 1 / loss
_A = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _snake_case , _snake_case , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _snake_case , _snake_case )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(_snake_case , args.output_dir )
def _snake_case ( ) -> Dict:
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_snake_case , type=_snake_case , required=_snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_snake_case , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_snake_case , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_snake_case , type=_snake_case , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_snake_case , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_snake_case , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_snake_case , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_snake_case , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=_snake_case , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_snake_case , help='Batch size.' )
parser.add_argument('--seed' , type=_snake_case , default=42 )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
_A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A = torch.device('cuda' , args.local_rank )
_A = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A = nn.parallel.DistributedDataParallel(
_snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_snake_case )
elif args.n_gpu > 1:
_A = nn.DataParallel(_snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_snake_case )
torch.save(_snake_case , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _snake_case )
# Prepare dataset
_A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A = (torch.from_numpy(_snake_case ),)
_A = TensorDataset(*_snake_case )
_A = RandomSampler(_snake_case )
_A = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_snake_case , _snake_case , _snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A = mask_heads(_snake_case , _snake_case , _snake_case )
prune_heads(_snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
main()
| 271
| 1
|
'''simple docstring'''
from typing import Any
import numpy as np
def __snake_case( _lowerCAmelCase ) -> bool:
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : int = v.conjugate().T
snake_case__ : Optional[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __snake_case( ) -> None:
snake_case__ : int = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
snake_case__ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f"{a} is not hermitian."
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case__ : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f"{a} is not hermitian."
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 35
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
_UpperCAmelCase = probability
def lowerCAmelCase_ ( self : Optional[Any] ):
return list(self.connections )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(lowercase ):
_UpperCAmelCase = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Union[str, Any] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase__:
__magic_name__ : List[str]
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="Translation" , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self : Union[str, Any] )-> str:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def a__( self : int )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase__:
__magic_name__ : Optional[List] = None
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="TranslationVariableLanguages" , init=lowerCAmelCase , repr=lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase = len(self.languages ) if self.languages else None
def __call__( self : int )-> Optional[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def a__( self : Optional[int] , lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = set(self.languages )
if self.languages and set(lowerCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(lowerCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase , UpperCAmelCase = zip(*sorted(lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def a__( self : Any )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 91
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A_ (UpperCamelCase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE__ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE__ : int = ["""image"""]
SCREAMING_SNAKE_CASE__ : Dict = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE__ : Dict = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 8
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCAmelCase_ : List[Any] = CLIPVisionModel(_a )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = CLIPImageProcessor(
crop_size=224 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCAmelCase_ : Optional[int] = PriorTransformer(**_a )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase_ : int = ShapERenderer(**_a )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.dummy_prior
UpperCAmelCase_ : Tuple = self.dummy_image_encoder
UpperCAmelCase_ : str = self.dummy_image_processor
UpperCAmelCase_ : Any = self.dummy_renderer
UpperCAmelCase_ : str = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
UpperCAmelCase_ : Union[str, Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
UpperCAmelCase_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("mps" ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_a )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_a ).manual_seed(_a )
UpperCAmelCase_ : List[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = """cpu"""
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_a )
UpperCAmelCase_ : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
UpperCAmelCase_ : Tuple = output.images[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = torch_device == """cpu"""
UpperCAmelCase_ : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_a )
UpperCAmelCase_ : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : str = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase_ : str = batch_size * [inputs[key]]
UpperCAmelCase_ : Union[str, Any] = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
UpperCAmelCase_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
UpperCAmelCase_ : List[Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
UpperCAmelCase_ : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : Tuple = torch.Generator(device=_a ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(
_a , generator=_a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 61
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121
|
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = []
lowercase__ : Tuple = []
lowercase__ : Any = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowercase__ : Any = len(lowerCamelCase__ ) if (len(lowerCamelCase__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(lowerCamelCase__ ) , "Postfix".center(lowerCamelCase__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase__ ) == 0:
stack.append(lowerCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase__ ) # push x to stack
print(
x.center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
while len(lowerCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
return "".join(lowerCamelCase__ ) # return Postfix as str
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase__ ) ):
if infix[i] == "(":
lowercase__ : Tuple = ")" # change "(" to ")"
elif infix[i] == ")":
lowercase__ : Optional[Any] = "(" # change ")" to "("
return (infix_2_postfix("".join(lowerCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
lowerCAmelCase__ = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 121
| 1
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if edge <= 0 or not isinstance(A__ , A__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _A ( A__ ):
"""simple docstring"""
if edge <= 0 or not isinstance(A__ , A__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: int ) -> float:
if digit_amount > 0:
return round(number - int(__UpperCAmelCase ) , __UpperCAmelCase )
return number - int(__UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 201
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Any ,_a : Optional[int]=2 ,_a : Optional[Any]=True ,_a : Dict=False ,_a : Dict=10 ,_a : Any=3 ,_a : str=32 * 8 ,_a : Optional[int]=32 * 8 ,_a : int=4 ,_a : str=64 ,):
'''simple docstring'''
_a : Dict = parent
_a : Union[str, Any] = batch_size
_a : Tuple = is_training
_a : List[str] = use_auxiliary_loss
_a : Optional[Any] = num_queries
_a : str = num_channels
_a : List[str] = min_size
_a : int = max_size
_a : Optional[int] = num_labels
_a : List[str] = hidden_dim
_a : int = hidden_dim
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
_a : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
_a : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
_a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_a : str = self.num_queries
_a : Union[str, Any] = self.num_labels
_a : Tuple = [1, 1, 1, 1]
_a : Dict = self.num_channels
_a : str = 64
_a : Tuple = 128
_a : Optional[Any] = self.hidden_dim
_a : Union[str, Any] = self.hidden_dim
_a : List[Any] = self.hidden_dim
return config
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a, _a, _a, _a, _a : Optional[Any] = self.prepare_config_and_inputs()
_a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : str = output.encoder_hidden_states
_a : Any = output.pixel_decoder_hidden_states
_a : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def __lowercase ( self : List[str] ,_a : str ,_a : List[Any] ,_a : Any ,_a : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
_a : str = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[Any] = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : int = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[int] = model(_a )
comm_check_on_output(_a )
_a : List[str] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = MaskaFormerModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a : Dict = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = (self.model_tester.min_size,) * 2
_a : Any = {
'pixel_values': torch.randn((2, 3, *size) ,device=_a ),
'mask_labels': torch.randn((2, 10, *size) ,device=_a ),
'class_labels': torch.zeros(2 ,10 ,device=_a ).long(),
}
_a : List[Any] = self.model_tester.get_config()
_a : int = MaskaFormerForUniversalSegmentation(_a ).to(_a )
_a : str = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a ).to(_a )
_a : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_a : List[str] = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[str] = self.model_tester.prepare_config_and_inputs()
_a : Any = model_class(_a )
model.to(_a )
model.train()
_a : Union[str, Any] = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : str = True
_a : str = True
_a : List[str] = model_class(_a ).to(_a )
model.train()
_a : Optional[int] = model(_a ,mask_labels=_a ,class_labels=_a )
_a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : Any ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
_a : int = self.default_image_processor
_a : Tuple = prepare_img()
_a : Any = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[Any] = model(**_a )
_a : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Any = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Optional[Any] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : str = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Any = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[int] = model(**_a )
# masks_queries_logits
_a : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_a : Optional[Any] = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_a : str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Tuple = self.default_image_processor
_a : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : str = inputs['pixel_values'].to(_a )
_a : str = [el.to(_a ) for el in inputs['mask_labels']]
_a : Dict = [el.to(_a ) for el in inputs['class_labels']]
with torch.no_grad():
_a : List[str] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 271
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
__lowerCAmelCase = {"""vinai/bartpho-syllable""": 1_0_2_4}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_a : str ,_a : Any ,_a : Any="<s>" ,_a : Dict="</s>" ,_a : int="</s>" ,_a : Union[str, Any]="<s>" ,_a : List[Any]="<unk>" ,_a : Optional[Any]="<pad>" ,_a : List[str]="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : int ,):
'''simple docstring'''
_a : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Optional[int] = vocab_file
_a : Union[str, Any] = monolingual_vocab_file
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : Union[str, Any] = {}
_a : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : int = cnt
cnt += 1
with open(_a ,'r' ,encoding='utf-8' ) as f:
for line in f.readlines():
_a : str = line.strip().split()[0]
_a : Tuple = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : List[str] = len(self.fairseq_tokens_to_ids )
_a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.__dict__.copy()
_a : str = None
_a : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : Tuple = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : List[str] = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Tuple ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowercase ( self : Any ,_a : int ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __lowercase ( self : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a ,'w' ,encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(_a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 271
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
lowercase : Optional[int] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 365
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase : Tuple = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= ["input_features", "attention_mask"]
def __init__( self ,snake_case=80 ,snake_case=16000 ,snake_case=0.0 ,snake_case=10 ,snake_case=25 ,snake_case="hamming_window" ,snake_case=32_768.0 ,snake_case=0.97 ,snake_case=1.0 ,snake_case=True ,snake_case=True ,snake_case=False ,**snake_case ,):
'''simple docstring'''
super().__init__(feature_size=snake_case ,sampling_rate=snake_case ,padding_value=snake_case ,**snake_case )
lowercase : Optional[Any] = feature_size
lowercase : List[Any] = sampling_rate
lowercase : int = padding_value
lowercase : Dict = hop_length
lowercase : List[str] = win_length
lowercase : List[Any] = frame_signal_scale
lowercase : List[Any] = preemphasis_coeff
lowercase : str = mel_floor
lowercase : int = normalize_means
lowercase : List[Any] = normalize_vars
lowercase : List[Any] = win_function
lowercase : int = return_attention_mask
lowercase : Any = win_length * sampling_rate // 1000
lowercase : Tuple = hop_length * sampling_rate // 1000
lowercase : Tuple = optimal_fft_length(self.sample_size )
lowercase : Dict = (self.n_fft // 2) + 1
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.win_function == "hamming_window":
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=snake_case )
else:
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function )
lowercase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowercase : Dict = spectrogram(
one_waveform * self.frame_signal_scale ,window=snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=snake_case ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if self.normalize_means:
lowercase : List[Any] = x[:input_length].mean(axis=0 )
lowercase : Dict = np.subtract(snake_case ,snake_case )
if self.normalize_vars:
lowercase : List[Any] = x[:input_length].std(axis=0 )
lowercase : List[Any] = np.divide(snake_case ,snake_case )
if input_length < x.shape[0]:
lowercase : Any = padding_value
# make sure array is in float32
lowercase : Tuple = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case ,snake_case ,self.padding_value ) for x, n in zip(snake_case ,snake_case )]
def __call__( self ,snake_case ,snake_case = False ,snake_case = None ,snake_case = False ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowercase : List[Any] = isinstance(snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowercase : str = is_batched_numpy or (
isinstance(snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray(snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case ,np.ndarray ):
lowercase : int = np.asarray(snake_case ,dtype=np.floataa )
elif isinstance(snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Dict = [raw_speech]
# extract fbank features
lowercase : Tuple = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase : Union[str, Any] = BatchFeature({"""input_features""": features} )
lowercase : Optional[int] = self.pad(
snake_case ,padding=snake_case ,max_length=snake_case ,truncation=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,**snake_case ,)
# make sure list is in array format
lowercase : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,snake_case ):
lowercase : List[Any] = [np.asarray(snake_case ,dtype=np.floataa ) for feature in input_features]
lowercase : int = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowercase : Any = [np.asarray(snake_case ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase : List[str] = (
np.array(snake_case ,dtype=np.intaa )
if self._get_padding_strategies(snake_case ,max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase : List[str] = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=snake_case )
if return_tensors is not None:
lowercase : str = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 285
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> str:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(_lowerCamelCase ,_lowerCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
_lowerCAmelCase : Optional[int] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int = 0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[str] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[Any] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 91
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
# vision encoder
if "img_encoder.pos_embed" in name:
UpperCamelCase = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
UpperCamelCase = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
UpperCamelCase = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
UpperCamelCase = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
UpperCamelCase = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
UpperCamelCase = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCamelCase = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
UpperCamelCase = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
UpperCamelCase = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
UpperCamelCase = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
UpperCamelCase = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCamelCase = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
UpperCamelCase = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
UpperCamelCase = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
UpperCamelCase = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCamelCase = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCamelCase = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCamelCase = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
UpperCamelCase = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
UpperCamelCase = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCamelCase = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
UpperCamelCase = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
UpperCamelCase = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
UpperCamelCase = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def __lowerCamelCase ( A__ , A__ ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(A__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCamelCase = key.split('.' )
UpperCamelCase , UpperCamelCase = int(key_split[2] ), int(key_split[4] )
UpperCamelCase = config.vision_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCamelCase = key.split('.' )
UpperCamelCase = int(key_split[3] )
UpperCamelCase = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = rename_key(A__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCamelCase = val.squeeze_()
else:
UpperCamelCase = val
return orig_state_dict
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__="groupvit-gcc-yfcc" , A__=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = GroupViTConfig()
UpperCamelCase = GroupViTModel(A__ ).eval()
UpperCamelCase = torch.load(A__ , map_location='cpu' )['model']
UpperCamelCase = convert_state_dict(A__ , A__ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(A__ ) == 0)
# verify result
UpperCamelCase = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase = prepare_img()
UpperCamelCase = processor(text=['a photo of a cat', 'a photo of a dog'] , images=A__ , padding=A__ , return_tensors='pt' )
with torch.no_grad():
UpperCamelCase = model(**A__ )
if model_name == "groupvit-gcc-yfcc":
UpperCamelCase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCamelCase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , A__ , atol=1e-3 )
processor.save_pretrained(A__ )
model.save_pretrained(A__ )
print('Successfully saved processor and model to' , A__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(A__ , organization='nielsr' )
model.push_to_hub(A__ , organization='nielsr' )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 249
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (UnCLIPScheduler,)
def A ( self : Union[str, Any] , **UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**UpperCamelCase__ )
return config
def A ( self : str ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(variance_type='fixed_small_log' )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(variance_type='learned_range' )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=UpperCamelCase__ ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=UpperCamelCase__ ) - -0.0_0_1_0_0_1_1 < 1E-5
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = scheduler.timesteps
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(2_5 )
UpperCamelCase = scheduler.timesteps
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
UpperCamelCase = None
else:
UpperCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def A ( self : Tuple ):
"""simple docstring"""
pass
def A ( self : Optional[int] ):
"""simple docstring"""
pass
| 249
| 1
|
from __future__ import annotations
import requests
UpperCAmelCase__ : List[Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def lowerCamelCase__ ( a , a = 1 , a = "new" , a = None ) -> dict:
_A: Dict = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_A: List[Any] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(a )
_A: Any = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_29:
raise requests.HTTPError
_A: Dict = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_A: List[str] = {}
for id_ in range(a ):
_A: int = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 121
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = '''informer'''
__UpperCamelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : str , ):
"""simple docstring"""
# time series specific configuration
_A: Optional[Any] = prediction_length
_A: Optional[Any] = context_length or prediction_length
_A: Dict = distribution_output
_A: List[str] = loss
_A: int = input_size
_A: List[str] = num_time_features
_A: Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A: str = scaling
_A: Optional[Any] = num_dynamic_real_features
_A: List[Any] = num_static_real_features
_A: Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_A: str = cardinality
else:
_A: Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_A: List[str] = embedding_dimension
else:
_A: Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_A: int = num_parallel_samples
# Transformer architecture configuration
_A: Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_A: Union[str, Any] = d_model
_A: Optional[Any] = encoder_attention_heads
_A: Optional[Any] = decoder_attention_heads
_A: Optional[Any] = encoder_ffn_dim
_A: Union[str, Any] = decoder_ffn_dim
_A: Any = encoder_layers
_A: str = decoder_layers
_A: List[str] = dropout
_A: Any = attention_dropout
_A: Optional[int] = activation_dropout
_A: List[Any] = encoder_layerdrop
_A: str = decoder_layerdrop
_A: int = activation_function
_A: Tuple = init_std
_A: Union[str, Any] = use_cache
# Informer
_A: Union[str, Any] = attention_type
_A: str = sampling_factor
_A: List[str] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 121
| 1
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 237
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =["pixel_values"]
def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : int = 0.9 , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Dict , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = crop_pct
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : Any , a : np.ndarray , a : Dict[str, int] , a : Optional[float] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__lowerCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__lowerCamelCase = int(size['''height'''] / crop_pct )
else:
__lowerCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(a ) )
__lowerCamelCase = get_resize_output_image_size(a , size=a , default_to_square=a )
else:
if "shortest_edge" in size:
__lowerCamelCase = get_resize_output_image_size(a , size=size['''shortest_edge'''] , default_to_square=a )
elif "height" in size and "width" in size:
__lowerCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(a ) )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a , size=(size['''height'''], size['''width''']) , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ):
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(a , a ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=a , tensor_type=a )
| 237
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'google/realm-cc-news-pretrained-embedder': 5_1_2,
'google/realm-cc-news-pretrained-encoder': 5_1_2,
'google/realm-cc-news-pretrained-scorer': 5_1_2,
'google/realm-cc-news-pretrained-openqa': 5_1_2,
'google/realm-orqa-nq-openqa': 5_1_2,
'google/realm-orqa-nq-reader': 5_1_2,
'google/realm-orqa-wq-openqa': 5_1_2,
'google/realm-orqa-wq-reader': 5_1_2,
}
UpperCamelCase__ = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = RealmTokenizer
def __init__(self : Optional[Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=True , __UpperCAmelCase : Tuple="[UNK]" , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : Tuple="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Any="[MASK]" , __UpperCAmelCase : Any=True , __UpperCAmelCase : str=None , **__UpperCAmelCase : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**__UpperCAmelCase )
UpperCAmelCase__ = do_lower_case
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = PaddingStrategy.MAX_LENGTH
UpperCAmelCase__ = text
UpperCAmelCase__ = kwargs.pop("text_pair" , __UpperCAmelCase )
UpperCAmelCase__ = kwargs.pop("return_tensors" , __UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(__UpperCAmelCase ):
if batch_text_pair is not None:
UpperCAmelCase__ = batch_text_pair[idx]
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = super().__call__(__UpperCAmelCase , __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = encoded_candidates.get("input_ids" )
UpperCAmelCase__ = encoded_candidates.get("attention_mask" )
UpperCAmelCase__ = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(__UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__UpperCAmelCase )
UpperCAmelCase__ = {key: item for key, item in output_data.items() if len(__UpperCAmelCase ) != 0}
return BatchEncoding(__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def lowercase_ (self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ (self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ (self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 143
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 143
| 1
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def a ( A__ : List[str] ) -> Any:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowerCAmelCase ( lowercase_ ):
@staticmethod
def A__ ( lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =get_logger('datasets-cli/converting' )
_lowercase =tfds_path
_lowercase =datasets_directory
def A__ ( self ) -> Any:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_lowercase =os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowercase =os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
_lowercase =os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowercase =[]
_lowercase =[]
_lowercase ={}
if os.path.isdir(self._tfds_path ):
_lowercase =os.listdir(lowerCAmelCase )
else:
_lowercase =[os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
_lowercase =f.readlines()
_lowercase =[]
_lowercase =False
_lowercase =False
_lowercase =[]
for line in lines:
_lowercase =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowercase ='import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
_lowercase =''
continue
elif "from absl import logging" in out_line:
_lowercase ='from datasets import logging\n'
elif "getLogger" in out_line:
_lowercase =out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowercase =True
_lowercase =list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowercase =re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowercase =re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
_lowercase ='from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowercase =True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowercase =f_name.replace('.py' , '' )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowercase =os.path.basename(lowerCAmelCase )
_lowercase =imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 205
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting'
snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case )
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 50
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = num_samples * [init_image]
snake_case_ = num_samples * [mask_image]
snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case )
# shard inputs and rng
snake_case_ = replicate(snake_case )
snake_case_ = jax.random.split(snake_case , jax.device_count() )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = pipeline(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case )
snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 )
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 285
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase_ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 253
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a_ = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a_ = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a_ = sorted(arg_to_scheduler.keys())
a_ = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class UpperCAmelCase_ ( pl.LightningModule ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="base" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ) -> int:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase_ )
__lowercase : Union[str, Any] = 0
__lowercase : int = Path(self.hparams.output_dir )
__lowercase : List[str] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__lowercase : Any = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase_ , **UpperCamelCase_ , )
else:
__lowercase : PretrainedConfig = config
__lowercase : Optional[int] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(self.config , UpperCamelCase_ ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , UpperCamelCase_ , getattr(self.hparams , UpperCamelCase_ ) )
if tokenizer is None:
__lowercase : Tuple = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase_ , )
else:
__lowercase : PreTrainedTokenizer = tokenizer
__lowercase : List[Any] = MODEL_MODES[mode]
if model is None:
__lowercase : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase_ , )
else:
__lowercase : List[Any] = model
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple:
__lowercase : List[Any] = self.model_type.from_pretrained(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
__lowercase : Optional[Any] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__lowercase : List[Any] = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def _lowerCamelCase ( self ) -> Any:
__lowercase : int = self.model
__lowercase : Optional[int] = ['''bias''', '''LayerNorm.weight''']
__lowercase : List[Any] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__lowercase : Tuple = Adafactor(
UpperCamelCase_ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase_ , relative_step=UpperCamelCase_ )
else:
__lowercase : Optional[Any] = AdamW(
UpperCamelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__lowercase : Optional[int] = optimizer
__lowercase : List[str] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
return self.validation_step(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
return self.validation_end(UpperCamelCase_ )
def _lowerCamelCase ( self ) -> int:
__lowercase : str = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__lowercase : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
if stage == "test":
__lowercase : Union[str, Any] = len(self.test_dataloader().dataset )
else:
__lowercase : Optional[int] = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=UpperCamelCase_ )
__lowercase : Dict = len(self.train_dataloader().dataset )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ) -> Any:
raise NotImplementedError('''You must implement this for your task''' )
def _lowerCamelCase ( self ) -> int:
return self.train_loader
def _lowerCamelCase ( self ) -> Tuple:
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ )
def _lowerCamelCase ( self ) -> int:
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
UpperCamelCase_ , list(filter(UpperCamelCase_ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def _lowerCamelCase ( self , UpperCamelCase_ ) -> None:
__lowercase : Dict = self.output_dir.joinpath('''best_tfmr''' )
__lowercase : List[str] = self.step_count
self.model.save_pretrained(UpperCamelCase_ )
self.tokenizer.save_pretrained(UpperCamelCase_ )
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
parser.add_argument(
'''--model_name_or_path''' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=UpperCamelCase_ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(UpperCamelCase_ ).parent / '''test_run''' / '''cache''' ) , type=UpperCamelCase_ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=UpperCamelCase_ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=UpperCamelCase_ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=UpperCamelCase_ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=UpperCamelCase_ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=UpperCamelCase_ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=UpperCamelCase_ , metavar=UpperCamelCase_ , type=UpperCamelCase_ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=UpperCamelCase_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=UpperCamelCase_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=UpperCamelCase_ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=UpperCamelCase_ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=UpperCamelCase_ )
parser.add_argument('''--train_batch_size''' , default=32 , type=UpperCamelCase_ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=UpperCamelCase_ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class UpperCAmelCase_ ( pl.Callback ):
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCAmelCase_ ( pl.Callback ):
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase_ )
class UpperCAmelCase_ ( pl.Callback ):
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
__lowercase : Dict = trainer.lr_schedulers[0]['''scheduler''']
__lowercase : int = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> str:
rank_zero_info('''***** Validation results *****''' )
__lowercase : Union[str, Any] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
rank_zero_info('''***** Test results *****''' )
__lowercase : int = trainer.callback_metrics
# Log and save results to file
__lowercase : Optional[Any] = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(UpperCamelCase_ , '''w''' ) as writer:
for key in sorted(UpperCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(__UpperCamelCase ).parent / '''test_run''' / '''model_checkpoints''' ) , type=__UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__UpperCamelCase , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=__UpperCamelCase )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=__UpperCamelCase , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=__UpperCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=__UpperCamelCase , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(__UpperCamelCase ).parent / '''test_run''' / '''dummy-train-data''' ) , type=__UpperCamelCase , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[] , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
pl.seed_everything(args.seed )
# init model
__lowercase : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
__lowercase : int = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__UpperCamelCase )
if logging_callback is None:
__lowercase : List[Any] = LoggingCallback()
__lowercase : Union[str, Any] = {}
if args.fpaa:
__lowercase : Optional[Any] = 16
if args.gpus > 1:
__lowercase : Optional[int] = '''auto'''
__lowercase : Optional[Any] = '''ddp'''
__lowercase : List[str] = args.accumulate_grad_batches
__lowercase : str = None
__lowercase : Optional[Any] = '''auto'''
__lowercase : Any = pl.Trainer.from_argparse_args(
__UpperCamelCase , weights_summary=__UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__UpperCamelCase , )
if args.do_train:
trainer.fit(__UpperCamelCase )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 249
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : Any = BlipImageProcessor()
__lowercase : Optional[int] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowercase : str = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__lowercase : str = InstructBlipProcessor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).qformer_tokenizer
def _lowerCamelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase : Any = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> str:
__lowercase : Any = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__lowercase : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase : Dict = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__lowercase : int = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Any = self.get_image_processor()
__lowercase : str = self.get_tokenizer()
__lowercase : Any = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : int = self.prepare_image_inputs()
__lowercase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__lowercase : Tuple = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ) -> str:
__lowercase : str = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[Any] = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Dict = '''lower newer'''
__lowercase : int = processor(text=UpperCamelCase_ )
__lowercase : List[str] = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
__lowercase : Union[str, Any] = qformer_tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Optional[int] = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Optional[int] = '''lower newer'''
__lowercase : Any = self.prepare_image_inputs()
__lowercase : List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Any = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : Any = self.get_qformer_tokenizer()
__lowercase : Tuple = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : List[str] = processor.batch_decode(UpperCamelCase_ )
__lowercase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : List[str] = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : List[Any] = self.get_qformer_tokenizer()
__lowercase : Optional[Any] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Any = '''lower newer'''
__lowercase : Union[str, Any] = self.prepare_image_inputs()
__lowercase : Union[str, Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 249
| 1
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = get_activation("swish")
self.assertIsInstance(UpperCamelCase__ , nn.SiLU)
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa)).item() , 2_0)
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = get_activation("silu")
self.assertIsInstance(UpperCamelCase__ , nn.SiLU)
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa)).item() , 2_0)
def lowercase_ ( self : List[Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = get_activation("mish")
self.assertIsInstance(UpperCamelCase__ , nn.Mish)
self.assertEqual(act(torch.tensor(-2_0_0 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa)).item() , 2_0)
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = get_activation("gelu")
self.assertIsInstance(UpperCamelCase__ , nn.GELU)
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa)).item() , 2_0)
| 108
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer
SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states"""
@property
def lowercase_ ( self : Dict)-> str:
'''simple docstring'''
__lowerCAmelCase: str = 4
__lowerCAmelCase: int = 8
__lowerCAmelCase: int = 7
__lowerCAmelCase: str = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Any = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str=0)-> str:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__)
__lowerCAmelCase: List[Any] = 4
__lowerCAmelCase: Dict = 8
__lowerCAmelCase: int = 7
__lowerCAmelCase: List[str] = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return (4, 8)
@property
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
return (4, 8)
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__lowerCAmelCase: Any = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCamelCase__)
__lowerCAmelCase: Dict = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowercase_ ( self : List[str])-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase: Tuple = self.model_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase: Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy")
__lowerCAmelCase: Union[str, Any] = model.to(UpperCamelCase__)
if hasattr(UpperCamelCase__ , "set_default_attn_processor"):
model.set_default_attn_processor()
__lowerCAmelCase: str = self.get_dummy_seed_input()
with torch.no_grad():
__lowerCAmelCase: Dict = model(**UpperCamelCase__)[0]
__lowerCAmelCase: Dict = output[0, :5].flatten().cpu()
print(UpperCamelCase__)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowerCAmelCase: List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239])
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2))
@slow
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : int , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : int=7_7 , UpperCamelCase__ : Any=0)-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__)
__lowerCAmelCase: List[Any] = batch_size
__lowerCAmelCase: Any = embedding_dim
__lowerCAmelCase: Dict = num_embeddings
__lowerCAmelCase: Dict = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: str = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: int = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : List[Any])-> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
])
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior")
model.to(UpperCamelCase__)
__lowerCAmelCase: Dict = self.get_dummy_seed_input(seed=UpperCamelCase__)
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(**UpperCamelCase__)[0]
assert list(sample.shape) == [1, 7_6_8]
__lowerCAmelCase: Dict = sample[0, :8].flatten().cpu()
print(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = torch.tensor(UpperCamelCase__)
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3)
| 108
| 1
|
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__lowerCAmelCase : Optional[Any] =True
from torch.cuda.amp import autocast
__lowerCAmelCase : Dict =logging.getLogger(__name__)
def UpperCamelCase ( _lowerCamelCase : int=None , _lowerCamelCase : int=None ):
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class UpperCAmelCase :
__lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__lowercase = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
__lowercase = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
__lowercase = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
__lowercase = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
__lowercase = field(
default=0.0_5 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
__lowercase = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class UpperCAmelCase :
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowercase = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__lowercase = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowercase = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
__lowercase = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class UpperCAmelCase :
__lowercase = 42
__lowercase = True
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
def __call__( self :Any , lowercase_ :List[Dict[str, Union[List[int], torch.Tensor]]] )-> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
A__ = [{"input_values": feature["input_values"]} for feature in features]
A__ = [{"input_ids": feature["labels"]} for feature in features]
A__ = self.processor.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
A__ = self.processor.pad(
labels=lowercase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
A__ = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
A__ = labels
return batch
class UpperCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self :Dict , lowercase_ :nn.Module , lowercase_ :Dict[str, Union[torch.Tensor, Any]] )-> torch.Tensor:
model.train()
A__ = self._prepare_inputs(lowercase_ )
if self.use_amp:
with autocast():
A__ = self.compute_loss(lowercase_ , lowercase_ )
else:
A__ = self.compute_loss(lowercase_ , lowercase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
A__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A__ = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
A__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowercase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase_ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__, A__, A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__, A__, A__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
A__ = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
A__ = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
A__ = F"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(_lowerCamelCase : Optional[int] ):
A__ = re.sub(_lowerCamelCase , "" , batch["sentence"] ).lower() + " "
return batch
A__ = train_dataset.map(_lowerCamelCase , remove_columns=["sentence"] )
A__ = eval_dataset.map(_lowerCamelCase , remove_columns=["sentence"] )
def extract_all_chars(_lowerCamelCase : int ):
A__ = " ".join(batch["text"] )
A__ = list(set(_lowerCamelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
A__ = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , batch_size=-1 , keep_in_memory=_lowerCamelCase , remove_columns=train_dataset.column_names , )
A__ = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , batch_size=-1 , keep_in_memory=_lowerCamelCase , remove_columns=eval_dataset.column_names , )
A__ = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
A__ = {v: k for k, v in enumerate(_lowerCamelCase )}
A__ = vocab_dict[" "]
del vocab_dict[" "]
A__ = len(_lowerCamelCase )
A__ = len(_lowerCamelCase )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(_lowerCamelCase , _lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase )
A__ = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
A__ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
A__ = min(len(_lowerCamelCase ) , data_args.max_train_samples )
A__ = train_dataset.select(range(_lowerCamelCase ) )
if data_args.max_val_samples is not None:
A__ = eval_dataset.select(range(data_args.max_val_samples ) )
A__ = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_lowerCamelCase : Union[str, Any] ):
A__, A__ = torchaudio.load(batch["path"] )
A__ = resampler(_lowerCamelCase ).squeeze().numpy()
A__ = 1_60_00
A__ = batch["text"]
return batch
A__ = train_dataset.map(
_lowerCamelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
A__ = eval_dataset.map(
_lowerCamelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_lowerCamelCase : Dict ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
A__ = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(_lowerCamelCase )
return batch
A__ = train_dataset.map(
_lowerCamelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , )
A__ = eval_dataset.map(
_lowerCamelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
A__ = datasets.load_metric("wer" )
def compute_metrics(_lowerCamelCase : Any ):
A__ = pred.predictions
A__ = np.argmax(_lowerCamelCase , axis=-1 )
A__ = processor.tokenizer.pad_token_id
A__ = processor.batch_decode(_lowerCamelCase )
# we do not want to group tokens when computing the metrics
A__ = processor.batch_decode(pred.label_ids , group_tokens=_lowerCamelCase )
A__ = wer_metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
A__ = DataCollatorCTCWithPadding(processor=_lowerCamelCase , padding=_lowerCamelCase )
# Initialize our Trainer
A__ = CTCTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , compute_metrics=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A__ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
A__ = model_args.model_name_or_path
else:
A__ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
A__ = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
A__ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("train" , _lowerCamelCase )
trainer.save_metrics("train" , _lowerCamelCase )
trainer.save_state()
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_val_samples if data_args.max_val_samples is not None else len(_lowerCamelCase )
A__ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 237
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """char"""
__lowercase = """bpe"""
__lowercase = """wp"""
__lowerCAmelCase : Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """char_tokenizer"""]
__lowercase = """ViTImageProcessor"""
__lowercase = """MgpstrTokenizer"""
def __init__( self :int , lowercase_ :int=None , lowercase_ :List[str]=None , **lowercase_ :List[Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A__ = tokenizer
A__ = AutoTokenizer.from_pretrained("gpt2" )
A__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Any=None , lowercase_ :Tuple=None , lowercase_ :List[str]=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None:
A__ = self.char_tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int:
A__, A__, A__ = sequences
A__ = char_preds.size(0 )
A__, A__ = self._decode_helper(lowercase_ , "char" )
A__, A__ = self._decode_helper(lowercase_ , "bpe" )
A__, A__ = self._decode_helper(lowercase_ , "wp" )
A__ = []
A__ = []
for i in range(lowercase_ ):
A__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
A__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
A__ = scores.index(max(lowercase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A__ = {}
A__ = final_strs
A__ = final_scores
A__ = char_strs
A__ = bpe_strs
A__ = wp_strs
return out
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :str )-> Optional[Any]:
if format == DecodeType.CHARACTER:
A__ = self.char_decode
A__ = 1
A__ = "[s]"
elif format == DecodeType.BPE:
A__ = self.bpe_decode
A__ = 2
A__ = "#"
elif format == DecodeType.WORDPIECE:
A__ = self.wp_decode
A__ = 1_02
A__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
A__, A__ = [], []
A__ = pred_logits.size(0 )
A__ = pred_logits.size(1 )
A__, A__ = pred_logits.topk(1 , dim=-1 , largest=lowercase_ , sorted=lowercase_ )
A__ = preds_index.view(-1 , lowercase_ )[:, 1:]
A__ = decoder(lowercase_ )
A__, A__ = torch.nn.functional.softmax(lowercase_ , dim=2 ).max(dim=2 )
A__ = preds_max_prob[:, 1:]
for index in range(lowercase_ ):
A__ = preds_str[index].find(lowercase_ )
A__ = preds_str[index][:pred_eos]
A__ = preds_index[index].cpu().tolist()
A__ = pred_index.index(lowercase_ ) if eos_token in pred_index else -1
A__ = preds_max_prob[index][: pred_eos_index + 1]
A__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase_ )
conf_scores.append(lowercase_ )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> int:
A__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowercase_ )]
return decode_strs
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> List[str]:
return self.bpe_tokenizer.batch_decode(lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[str] )-> Union[str, Any]:
A__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowercase_ )]
return decode_strs
| 237
| 1
|
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
return False
A_ : int = len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :int = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase :int = [int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase :Tuple = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase :List[Any] = '''''' if binary_search(sequence, target) else '''not '''
print(F"{target} was {not_str}found in {sequence}")
| 135
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase :Tuple = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ):
A_ : Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A_ : List[str] = 3
A_ : List[str] = do_lower_case
A_ : Tuple = remove_space
A_ : Tuple = keep_accents
A_ : Tuple = vocab_file
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
A_ : Tuple = jieba
A_ : int = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a (self ):
return len(self.sp_model )
def _a (self ):
A_ : str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
A_ : Optional[Any] = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__(self , lowercase ):
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : List[Any] = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , lowercase ):
if self.remove_space:
A_ : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
A_ : Optional[int] = inputs
A_ : List[str] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A_ : str = unicodedata.normalize("""NFKD""" , lowercase )
A_ : Union[str, Any] = """""".join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
A_ : int = outputs.lower()
return outputs
def _a (self , lowercase ):
A_ : Optional[int] = self.preprocess_text(lowercase )
A_ : Dict = self.sp_model.encode(lowercase , out_type=lowercase )
A_ : List[Any] = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A_ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Dict = cur_pieces[1:]
else:
A_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def _a (self , lowercase ):
return self.sp_model.PieceToId(lowercase )
def _a (self , lowercase ):
return self.sp_model.IdToPiece(lowercase )
def _a (self , lowercase ):
A_ : int = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _a (self , lowercase , lowercase = None ):
A_ : List[str] = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def _a (self , lowercase , lowercase = None ):
A_ : str = [self.sep_token_id]
A_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Tuple = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _a (self , *lowercase , **lowercase ):
A_ : Any = super()._decode(*lowercase , **lowercase )
A_ : int = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 135
| 1
|
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
snake_case__ : Dict = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case__ : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case__ : Tuple = min(A__ , A__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 143
|
from __future__ import annotations
def UpperCamelCase__ ( A__ , A__ , A__ ) -> tuple[float, list[float]]:
snake_case__ : Optional[Any] = list(range(len(A__ ) ) )
snake_case__ : str = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
snake_case__ : float = 0
snake_case__ : list[float] = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
snake_case__ : str = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case__ : str = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
a : Optional[Any] = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : Any=8 ) ->Optional[Any]:
'''simple docstring'''
a : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a : Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Dict:
super().__init__()
self.register_modules(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , movq=lowerCAmelCase__ , )
a : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
if latents is None:
a : List[str] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a : List[Any] = latents.to(lowerCAmelCase__ )
a : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def __a ( self , lowerCAmelCase__=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a : Tuple = torch.device(f"""cuda:{gpu_id}""" )
a : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
a : str = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
a, a : Dict = cpu_offload_with_hook(lowerCAmelCase__ , lowerCAmelCase__ , prev_module_hook=lowerCAmelCase__ )
# We'll offload the last model manually.
a : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self ) -> Any:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> List[str]:
a : Optional[Any] = self._execution_device
a : Optional[int] = guidance_scale > 1.0
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : int = torch.cat(lowerCAmelCase__ , dim=0 )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[Any] = torch.cat(lowerCAmelCase__ , dim=0 )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : int = torch.cat(lowerCAmelCase__ , dim=0 )
a : Any = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a : List[Any] = image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
a : Optional[int] = negative_image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
a : List[str] = hint.repeat_interleave(lowerCAmelCase__ , dim=0 )
a : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase__ )
a : Dict = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__ )
a : Dict = self.scheduler.timesteps
a : List[str] = self.movq.config.latent_channels
a, a : List[Any] = downscale_height_and_width(lowerCAmelCase__ , lowerCAmelCase__ , self.movq_scale_factor )
# create initial latent
a : List[str] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
a : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : int = {"image_embeds": image_embeds, "hint": hint}
a : Union[str, Any] = self.unet(
sample=lowerCAmelCase__ , timestep=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , added_cond_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
if do_classifier_free_guidance:
a, a : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
a, a : Tuple = noise_pred.chunk(2 )
a, a : Union[str, Any] = variance_pred.chunk(2 )
a : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a, a : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ , )[0]
# post-processing
a : int = self.movq.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a : List[str] = image * 0.5 + 0.5
a : Dict = image.clamp(0 , 1 )
a : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a : Dict = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 79
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 79
| 1
|
from math import factorial
__snake_case :int = {str(d): factorial(d) for d in range(10)}
def __snake_case ( _UpperCAmelCase ):
return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCAmelCase ) )
def __snake_case ( ):
__a = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _UpperCAmelCase ) if sum_of_digit_factorial(_UpperCAmelCase ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 49
|
import os
import numpy
import onnx
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = a.name
SCREAMING_SNAKE_CASE_ : Dict = b.name
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
SCREAMING_SNAKE_CASE_ : int = ''
SCREAMING_SNAKE_CASE_ : Tuple = a == b
SCREAMING_SNAKE_CASE_ : Dict = name_a
SCREAMING_SNAKE_CASE_ : List[Any] = name_b
return res
def A_ ( a , a , a ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a , a )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
_graph_replace_input_with(node_proto.attribute[1].g , a , a )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
def A_ ( a , a , a ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(a , a , a )
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_ : List[Any] = inits[i].name
SCREAMING_SNAKE_CASE_ : int = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.dirname(a )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(a )
SCREAMING_SNAKE_CASE_ : str = onnx.load(os.path.join(a , a ) )
SCREAMING_SNAKE_CASE_ : Dict = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : str = set()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(len(a ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a )
dup_set.add(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = inits[j].data_type
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , a )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_ : Any = inits[i].name
SCREAMING_SNAKE_CASE_ : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a )
else:
SCREAMING_SNAKE_CASE_ : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
SCREAMING_SNAKE_CASE_ : Tuple = sorted(a )
_remove_dup_initializers_from_model(a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = 'optimized_' + model_file_name
SCREAMING_SNAKE_CASE_ : Any = os.path.join(a , a )
onnx.save(a , a )
return new_model
| 253
| 0
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
print('Loading config file...' )
def flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]="" , __SCREAMING_SNAKE_CASE : Union[str, Any]="." ):
lowercase_ : Optional[Any] = []
for k, v in d.items():
lowercase_ : str = parent_key + sep + k if parent_key else k
if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sep=__SCREAMING_SNAKE_CASE ).items() )
else:
items.append((new_key, v) )
return dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = argparse.Namespace()
with open(__SCREAMING_SNAKE_CASE , 'r' ) as yaml_file:
try:
lowercase_ : int = yaml.load(__SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader )
lowercase_ : List[str] = flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE )
for k, v in flat_cfg.items():
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE ) ) )
return config
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Optional[Any] = MobileViTVaConfig()
lowercase_ : Optional[int] = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowercase_ : int = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : Optional[Any] = 3_84
else:
lowercase_ : Dict = 2_56
lowercase_ : List[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowercase_ : Optional[Any] = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : Tuple = 3_84
else:
lowercase_ : List[Any] = 2_56
lowercase_ : Optional[int] = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowercase_ : Tuple = 1_51
lowercase_ : str = 5_12
lowercase_ : Optional[int] = 'ade20k-id2label.json'
lowercase_ : Optional[int] = True
elif task_name.startswith('voc_' ):
lowercase_ : Any = 21
lowercase_ : Optional[Any] = 5_12
lowercase_ : Dict = 'pascal-voc-id2label.json'
lowercase_ : Tuple = True
# orig_config
lowercase_ : List[str] = load_orig_config_file(__SCREAMING_SNAKE_CASE )
assert getattr(__SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
lowercase_ : int = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase_ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase_ : List[str] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
lowercase_ : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
lowercase_ : int = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
lowercase_ : List[str] = 'huggingface/label-files'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Any = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : int = idalabel
lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
if base_model:
lowercase_ : Dict = ''
else:
lowercase_ : Optional[int] = 'mobilevitv2.'
lowercase_ : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase_ : List[Any] = k[8:]
else:
lowercase_ : List[Any] = k
if ".block." in k:
lowercase_ : Optional[Any] = k_new.replace('.block.' , '.' )
if ".conv." in k:
lowercase_ : List[str] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
lowercase_ : List[Any] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
lowercase_ : str = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
lowercase_ : List[Any] = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowercase_ : Dict = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
lowercase_ : Any = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
lowercase_ : Union[str, Any] = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
lowercase_ : int = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
lowercase_ : Any = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowercase_ : Dict = [0, 1]
elif i == 4:
lowercase_ : Optional[Any] = [0, 1, 2, 3]
elif i == 5:
lowercase_ : List[str] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase_ : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase_ : List[str] = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
lowercase_ : Optional[int] = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowercase_ : Any = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowercase_ : Optional[Any] = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
lowercase_ : Union[str, Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowercase_ : Optional[Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowercase_ : Dict = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
lowercase_ : str = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
lowercase_ : Optional[Any] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
lowercase_ : List[str] = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
lowercase_ : Optional[Any] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : Optional[int] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__SCREAMING_SNAKE_CASE )
for k in keys_to_ignore:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( ):
lowercase_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase_ : Optional[int] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : int = get_mobilevitva_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load original state_dict
lowercase_ : List[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowercase_ : str = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : Optional[Any] = False
else:
lowercase_ : Tuple = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : int = False
# remove and rename some keys of load the original model
lowercase_ : Tuple = checkpoint
remove_unused_keys(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load modified state_dict
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase_ : List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase_ : Dict = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase_ : str = model(**__SCREAMING_SNAKE_CASE )
# verify classification model
if task_name.startswith('imagenet' ):
lowercase_ : int = outputs.logits
lowercase_ : Any = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase_ : Optional[int] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 321
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 1
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCAmelCase__ = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowerCAmelCase__ = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowerCAmelCase__ = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : List[str] = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
lowerCAmelCase : List[Any] = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(snake_case__ , snake_case__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(snake_case__ , snake_case__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(snake_case__ , snake_case__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 108
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowerCAmelCase : Tuple = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCAmelCase : List[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 1
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__ ( ) -> str:
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[str]:
super().__init__()
_UpperCamelCase : List[str] = nn.Linear(3 , 4 )
_UpperCamelCase : int = nn.BatchNormad(4 )
_UpperCamelCase : Dict = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [128, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : List[Any] , __a : Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCamelCase : Tuple = mock_training_loop_function("hello" )
self.assertListEqual(__a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Tuple ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a : Tuple ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : Optional[Any] , __a : Optional[int] , __a : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a : int ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase : List[str] = torch.cuda.memory_allocated()
_UpperCamelCase : Dict = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
_UpperCamelCase : Union[str, Any] = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 356
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
class _snake_case :
def __init__( self : Optional[Any] , UpperCAmelCase : int = 0 ):
__lowerCamelCase : Optional[Any] = key
def lowerCamelCase__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : int ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCAmelCase ) ^ key ) for ch in content]
def lowerCamelCase__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : int ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCAmelCase ) ^ key ) for ch in content]
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : int = 0 ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__lowerCamelCase : Optional[int] = ""
for ch in content:
ans += chr(ord(UpperCAmelCase ) ^ key )
return ans
def lowerCamelCase__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : int = 0 ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__lowerCamelCase : List[str] = ""
for ch in content:
ans += chr(ord(UpperCAmelCase ) ^ key )
return ans
def lowerCamelCase__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : int = 0 ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
try:
with open(UpperCAmelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(UpperCAmelCase , UpperCAmelCase ) )
except OSError:
return False
return True
def lowerCamelCase__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : int ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
try:
with open(UpperCAmelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(UpperCAmelCase , UpperCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 135
|
"""simple docstring"""
__A = [0, 2, 4, 6, 8]
__A = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: list[int] , _lowerCamelCase: int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase : Union[str, Any] = 0
for digit in range(10 ):
__lowerCamelCase : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase )
return result
__lowerCamelCase : List[str] = 0
for digita in range(10 ):
__lowerCamelCase : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase : Any = ODD_DIGITS
else:
__lowerCamelCase : Dict = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase: int = 9 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 135
| 1
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_A = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : str = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a_, a_ )
_A = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = list(s_dict.keys() )
for key in keys:
lowerCamelCase : List[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCamelCase : Optional[int] = new_key.replace(a_, a_ )
print(F"""{key} -> {new_key}""" )
lowerCamelCase : Any = s_dict.pop(a_ )
return s_dict
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : int = emb.weight.shape
lowerCamelCase : Dict = nn.Linear(a_, a_, bias=a_ )
lowerCamelCase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
os.makedirs(a_, exist_ok=a_ )
lowerCamelCase : Union[str, Any] = os.path.basename(a_ )
lowerCamelCase : Any = url.split('/' )[-2]
lowerCamelCase : Tuple = os.path.join(a_, a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(a_ ):
lowerCamelCase : Union[str, Any] = open(a_, 'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(a_ ) as source, open(a_, 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ), ncols=80, unit='iB', unit_scale=a_, unit_divisor=1024 ) as loop:
while True:
lowerCamelCase : Union[str, Any] = source.read(8192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
lowerCamelCase : int = open(a_, 'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowerCamelCase : str = _download(_MODELS[checkpoint_path] )
else:
lowerCamelCase : Any = torch.load(a_, map_location='cpu' )
lowerCamelCase : List[str] = original_checkpoint['dims']
lowerCamelCase : Any = original_checkpoint['model_state_dict']
lowerCamelCase : Tuple = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a_ )
rename_keys(a_ )
lowerCamelCase : List[Any] = True
lowerCamelCase : str = state_dict['decoder.layers.0.fc1.weight'].shape[0]
lowerCamelCase : Optional[int] = WhisperConfig(
vocab_size=dimensions['n_vocab'], encoder_ffn_dim=a_, decoder_ffn_dim=a_, num_mel_bins=dimensions['n_mels'], d_model=dimensions['n_audio_state'], max_target_positions=dimensions['n_text_ctx'], encoder_layers=dimensions['n_audio_layer'], encoder_attention_heads=dimensions['n_audio_head'], decoder_layers=dimensions['n_text_layer'], decoder_attention_heads=dimensions['n_text_state'], max_source_positions=dimensions['n_audio_ctx'], )
lowerCamelCase : Union[str, Any] = WhisperForConditionalGeneration(a_ )
lowerCamelCase : Optional[int] = model.model.load_state_dict(a_, strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowerCamelCase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase : Tuple = proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 368
|
"""simple docstring"""
def UpperCAmelCase ( ):
'''simple docstring'''
return 1
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(a_ )
def UpperCAmelCase ( a_ = 200 ):
'''simple docstring'''
return two_pound(a_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 205
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.