code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 411
|
a__ : Tuple = "Tobias Carryer"
from time import time
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : str=int(time())) -> List[Any]: # noqa: B008
"""simple docstring"""
lowercase__ = multiplier
lowercase__ = increment
lowercase__ = modulo
lowercase__ = seed
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
lowercase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a__ : str = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 622
| 0
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase_ : Optional[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCamelCase_ : List[Any] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCamelCase_ : Optional[Any] = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def lowerCamelCase__ ( self : Dict , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Dict=None , _snake_case : Tuple="uniform_average" , _snake_case : str=True ) -> List[str]:
"""simple docstring"""
A_ = mean_squared_error(
_snake_case , _snake_case , sample_weight=_snake_case , multioutput=_snake_case , squared=_snake_case )
return {"mse": mse}
| 482
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *_snake_case : int , **_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
pass
def A_ (__a ):
'''simple docstring'''
A_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ (__a ):
'''simple docstring'''
A_ = np.array(__a )
A_ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : int ) -> str:
"""simple docstring"""
A_ = MaskGenerationPipeline(model=_snake_case , image_processor=_snake_case )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : int , _snake_case : int , _snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
A_ = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
A_ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = "facebook/sam-vit-huge"
A_ = pipeline("mask-generation" , model=_snake_case )
A_ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
] , )
| 482
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def _UpperCamelCase ( _A ) -> list[int]:
"""simple docstring"""
if num <= 0:
_UpperCAmelCase = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(_A )
_UpperCAmelCase = [True] * (num + 1)
_UpperCAmelCase = []
_UpperCAmelCase = 2
_UpperCAmelCase = int(math.sqrt(_A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_A )
# Set multiples of start be False
for i in range(start * start , num + 1 , _A ):
if sieve[i] is True:
_UpperCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 555
|
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def _UpperCamelCase ( _A ) -> Callable:
"""simple docstring"""
@wraps(_A )
def _inner_fn(*_A , **_A ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _A , )
return fn(*_A , **_A )
return _inner_fn
| 555
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = GPTaTokenizer
A = GPTaTokenizerFast
A = True
A = {'add_prefix_space': True}
A = False
def __a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE ,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
SCREAMING_SNAKE_CASE : Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : List[str] = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__SCREAMING_SNAKE_CASE ) )
def __a ( self ,**__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,**__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE : str = 'lower newer'
return input_text, output_text
def __a ( self ):
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[int] = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ,add_prefix_space=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
def __a ( self ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
# Testing tokenization
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ,add_prefix_space=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,add_prefix_space=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE ,add_prefix_space=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Testing the unknown token
SCREAMING_SNAKE_CASE : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
def __a ( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __a ( self ,__SCREAMING_SNAKE_CASE=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
# Simple input
SCREAMING_SNAKE_CASE : Dict = 'This is a simple input'
SCREAMING_SNAKE_CASE : str = ['This is a simple input 1', 'This is a simple input 2']
SCREAMING_SNAKE_CASE : Union[str, Any] = ('This is a simple input', 'This is a pair')
SCREAMING_SNAKE_CASE : Optional[int] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding='max_length' )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding='max_length' )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding='max_length' ,)
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding='max_length' )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding='max_length' )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding='max_length' ,)
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='<pad>' )
# Simple input
SCREAMING_SNAKE_CASE : List[str] = 'This is a simple input'
SCREAMING_SNAKE_CASE : Optional[Any] = ['This is a simple input looooooooong', 'This is a simple input']
SCREAMING_SNAKE_CASE : Union[str, Any] = ('This is a simple input', 'This is a pair')
SCREAMING_SNAKE_CASE : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE ,padding='max_length' ,max_length=30 ,return_tensors='np' )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncate=__SCREAMING_SNAKE_CASE ,return_tensors='np' )
SCREAMING_SNAKE_CASE : str = tokenizer(*__SCREAMING_SNAKE_CASE ,padding='max_length' ,max_length=60 ,return_tensors='np' )
SCREAMING_SNAKE_CASE : List[str] = tokenizer(__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncate=__SCREAMING_SNAKE_CASE ,return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = '$$$'
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=__SCREAMING_SNAKE_CASE ,add_bos_token=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = 'This is a simple input'
SCREAMING_SNAKE_CASE : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
SCREAMING_SNAKE_CASE : Any = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] ,__SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,__SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __a ( self ):
pass
def __a ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE : Any = [self.get_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ,add_bos_token=__SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : Any = 'Encode this.'
SCREAMING_SNAKE_CASE : List[str] = 'This one too please.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode_plus(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,return_special_tokens_mask=__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE : Optional[int] = encoded_sequence_dict['input_ids']
SCREAMING_SNAKE_CASE : int = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__SCREAMING_SNAKE_CASE )
]
SCREAMING_SNAKE_CASE : Dict = [x for x in filtered_sequence if x is not None]
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
@require_tokenizers
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = 'A photo of a cat'
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(
__SCREAMING_SNAKE_CASE ,)
self.assertEqual(__SCREAMING_SNAKE_CASE ,[2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('./test_opt' )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(
__SCREAMING_SNAKE_CASE ,)
self.assertEqual(__SCREAMING_SNAKE_CASE ,[2, 250, 1345, 9, 10, 4758] )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained('facebook/opt-350m' ,use_slow=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = 'A photo of a cat'
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
__SCREAMING_SNAKE_CASE ,)
# Same as above
self.assertEqual(__SCREAMING_SNAKE_CASE ,[2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = 'bos'
SCREAMING_SNAKE_CASE : Dict = tokenizer.get_vocab()['bos']
SCREAMING_SNAKE_CASE : Optional[Any] = 'A photo of a cat'
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(
__SCREAMING_SNAKE_CASE ,)
# We changed the bos token
self.assertEqual(__SCREAMING_SNAKE_CASE ,[31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
__SCREAMING_SNAKE_CASE ,)
self.assertEqual(__SCREAMING_SNAKE_CASE ,[31957, 250, 1345, 9, 10, 4758] )
| 220
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('KT')
__UpperCAmelCase = TypeVar('VT')
class _a ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE = "root" ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Optional[Any] = key
SCREAMING_SNAKE_CASE : Optional[Any] = value
SCREAMING_SNAKE_CASE : list[Node[KT, VT]] = []
def __repr__( self ):
return f"""Node({self.key}: {self.value})"""
@property
def __a ( self ):
return len(self.forward )
class _a ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE = 0.5 ,__SCREAMING_SNAKE_CASE = 16 ):
SCREAMING_SNAKE_CASE : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Optional[Any] = p
SCREAMING_SNAKE_CASE : Dict = max_level
def __str__( self ):
SCREAMING_SNAKE_CASE : Union[str, Any] = list(self )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return f"""SkipList(level={self.level})"""
SCREAMING_SNAKE_CASE : Optional[Any] = max((len(str(__SCREAMING_SNAKE_CASE ) ) for item in items) ,default=4 )
SCREAMING_SNAKE_CASE : List[str] = max(__SCREAMING_SNAKE_CASE ,4 ) + 4
SCREAMING_SNAKE_CASE : List[Any] = self.head
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE ,'-' ) + '* ' * len(__SCREAMING_SNAKE_CASE ) )
lines.append(' ' * label_size + '| ' * len(__SCREAMING_SNAKE_CASE ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE : str = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE ,'-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Optional[Any] = node.forward
lines.append('None'.ljust(__SCREAMING_SNAKE_CASE ) + '* ' * len(__SCREAMING_SNAKE_CASE ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(__SCREAMING_SNAKE_CASE )
def __iter__( self ):
SCREAMING_SNAKE_CASE : Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE : Any = node.forward[0]
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : str = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE : Dict = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE : Dict = node.forward[i]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = update_node.forward[:i]
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,__SCREAMING_SNAKE_CASE ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE : str = level
SCREAMING_SNAKE_CASE : List[str] = Node(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : List[Any] = new_node
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
return node.value
return None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
SCREAMING_SNAKE_CASE : int = skip_list.head
SCREAMING_SNAKE_CASE : List[str] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = node.forward[0]
SCREAMING_SNAKE_CASE : List[str] = node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
SCREAMING_SNAKE_CASE : List[str] = skip_list.head
SCREAMING_SNAKE_CASE : str = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : int = node.forward[0]
SCREAMING_SNAKE_CASE : Any = node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def SCREAMING_SNAKE_CASE_ ( ) -> str:
SCREAMING_SNAKE_CASE : Tuple = SkipList()
assert skip_list.find('Some key' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
SCREAMING_SNAKE_CASE : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(snake_case_ : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
def is_sorted(snake_case_ : int ):
return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) )
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
for i in range(10 ):
skip_list.insert(snake_case_ , snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case_ ) )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 220
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _snake_case ( __UpperCAmelCase ):
_A : Optional[int] = 'WhisperFeatureExtractor'
_A : Optional[Any] = 'WhisperTokenizer'
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str] ):
super().__init__(lowerCAmelCase_ ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE:int = self.feature_extractor
SCREAMING_SNAKE_CASE:Optional[int] = False
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase_ ,language=lowerCAmelCase_ ,no_timestamps=lowerCAmelCase_ )
def __call__( self : Any ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ ,**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE:List[str] = kwargs.pop("audio" ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE:Optional[Any] = kwargs.pop("sampling_rate" ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE:Optional[int] = kwargs.pop("text" ,lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE:List[Any] = args[0]
SCREAMING_SNAKE_CASE:List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE:str = self.feature_extractor(lowerCAmelCase_ ,*lowerCAmelCase_ ,sampling_rate=lowerCAmelCase_ ,**lowerCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE:Union[str, Any] = self.tokenizer(lowerCAmelCase_ ,**lowerCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE:Tuple = encodings['input_ids']
return inputs
def __UpperCamelCase ( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[int] ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ ,**lowerCAmelCase_ )
def __UpperCamelCase ( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[int] ):
return self.tokenizer.decode(*lowerCAmelCase_ ,**lowerCAmelCase_ )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="np" ):
return self.tokenizer.get_prompt_ids(lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ )
| 143
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _lowerCAmelCase :
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase :
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase ( __UpperCAmelCase ):
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> jnp.ndarray:
for processor in self:
_SCREAMING_SNAKE_CASE : int = inspect.signature(processor.__call__ ).parameters
if len(lowerCAmelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
_SCREAMING_SNAKE_CASE : Any = processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ ) -> Any:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
_SCREAMING_SNAKE_CASE : Tuple = temperature
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE : Optional[int] = scores / self.temperature
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = -float('Inf' ) , lowerCAmelCase_ = 1 ) -> int:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = top_p
_SCREAMING_SNAKE_CASE : Optional[int] = filter_value
_SCREAMING_SNAKE_CASE : Any = min_tokens_to_keep
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = lax.top_k(lowerCAmelCase_ , scores.shape[-1] )
_SCREAMING_SNAKE_CASE : Dict = jnp.full_like(lowerCAmelCase_ , self.filter_value )
_SCREAMING_SNAKE_CASE : int = jax.nn.softmax(lowerCAmelCase_ , axis=-1 ).cumsum(axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(lowerCAmelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCAmelCase_ )
# min tokens to keep
_SCREAMING_SNAKE_CASE : Optional[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = jnp.where(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = jax.lax.sort_key_val(lowerCAmelCase_ , lowerCAmelCase_ )[-1]
return next_scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = -float('Inf' ) , lowerCAmelCase_ = 1 ) -> Tuple:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
_SCREAMING_SNAKE_CASE : Tuple = max(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = filter_value
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = scores.shape
_SCREAMING_SNAKE_CASE : int = jnp.full(batch_size * vocab_size , self.filter_value )
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = lax.top_k(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.broadcast_to((jnp.arange(lowerCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_SCREAMING_SNAKE_CASE : List[Any] = topk_scores.flatten()
_SCREAMING_SNAKE_CASE : Dict = topk_indices.flatten() + shift
_SCREAMING_SNAKE_CASE : List[str] = next_scores_flat.at[topk_indices_flat].set(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : int = next_scores_flat.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
return next_scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.full(scores.shape , -float('inf' ) )
_SCREAMING_SNAKE_CASE : int = 1 - jnp.bool_(cur_len - 1 )
_SCREAMING_SNAKE_CASE : List[str] = jnp.where(lowerCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = max_length
_SCREAMING_SNAKE_CASE : Any = eos_token_id
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE : List[Any] = jnp.full(scores.shape , -float('inf' ) )
_SCREAMING_SNAKE_CASE : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_SCREAMING_SNAKE_CASE : Tuple = jnp.where(lowerCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = min_length
_SCREAMING_SNAKE_CASE : str = eos_token_id
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
_SCREAMING_SNAKE_CASE : Tuple = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(lowerCAmelCase_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = list(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = begin_index
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_SCREAMING_SNAKE_CASE : Any = 1 - jnp.bool_(cur_len - self.begin_index )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.where(lowerCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = list(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = dict(lowerCAmelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_SCREAMING_SNAKE_CASE : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_SCREAMING_SNAKE_CASE : Dict = force_token_array.at[index].set(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : str = jnp.intaa(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
def _force_token(lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : str = scores.shape[0]
_SCREAMING_SNAKE_CASE : List[Any] = self.force_token_array[generation_idx]
_SCREAMING_SNAKE_CASE : Dict = jnp.ones_like(lowerCAmelCase_ , dtype=scores.dtype ) * -float('inf' )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_SCREAMING_SNAKE_CASE : str = lax.dynamic_update_slice(lowerCAmelCase_ , lowerCAmelCase_ , (0, current_token) )
return new_scores
_SCREAMING_SNAKE_CASE : Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCAmelCase_ ) , lambda: scores , ) , )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = generate_config.eos_token_id
_SCREAMING_SNAKE_CASE : str = generate_config.no_timestamps_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = generate_config.no_timestamps_token_id + 1
_SCREAMING_SNAKE_CASE : str = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCAmelCase_ , 'max_initial_timestamp_index' ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.max_initial_timestamp_index
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_SCREAMING_SNAKE_CASE : Dict = model_config.vocab_size
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
# suppress <|notimestamps|> which is handled by without_timestamps
_SCREAMING_SNAKE_CASE : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : int = jnp.where((cur_len - self.begin_index) >= 1 , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : Any = jnp.where((cur_len - self.begin_index) < 2 , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCAmelCase_ , lowerCAmelCase_ , )
return jnp.where(
lowerCAmelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(lowerCAmelCase_ )(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = jnp.where(cur_len == self.begin_index , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : Tuple = self.timestamp_begin + self.max_initial_timestamp_index
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(
lowerCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , lowerCAmelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
_SCREAMING_SNAKE_CASE : str = jax.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
def handle_cumulative_probs(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : Dict = jax.vmap(lowerCAmelCase_ )(lowerCAmelCase_ , lowerCAmelCase_ )
return scores
| 621
| 0
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : Optional[Any] = 'Hello, World!'
lowercase_ : Tuple = 'en_XX'
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = Path('data_bin' )
__lowerCamelCase : List[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE__ ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE__ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE__ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE__ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = xmod.model.encoder.sentence_encoder
__lowerCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCamelCase : List[str] = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = XmodForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase : Tuple = xmod_sent_encoder.embed_tokens.weight
__lowerCamelCase : Tuple = xmod_sent_encoder.embed_positions.weight
__lowerCamelCase : Dict = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCamelCase : Tuple = xmod_sent_encoder.layernorm_embedding.weight
__lowerCamelCase : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase : List[str] = model.roberta.encoder.layer[i]
__lowerCamelCase : Union[str, Any] = xmod_sent_encoder.layers[i]
# self attention
__lowerCamelCase : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowerCamelCase : Optional[int] = xmod_layer.self_attn.q_proj.weight
__lowerCamelCase : Tuple = xmod_layer.self_attn.q_proj.bias
__lowerCamelCase : Optional[Any] = xmod_layer.self_attn.k_proj.weight
__lowerCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__lowerCamelCase : Tuple = xmod_layer.self_attn.v_proj.weight
__lowerCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowerCamelCase : Tuple = xmod_layer.self_attn.out_proj.weight
__lowerCamelCase : Dict = xmod_layer.self_attn.out_proj.bias
__lowerCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__lowerCamelCase : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCamelCase : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowerCamelCase : Union[str, Any] = xmod_layer.fca.weight
__lowerCamelCase : List[Any] = xmod_layer.fca.bias
# output
__lowerCamelCase : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowerCamelCase : str = xmod_layer.fca.weight
__lowerCamelCase : Union[str, Any] = xmod_layer.fca.bias
__lowerCamelCase : Tuple = xmod_layer.final_layer_norm.weight
__lowerCamelCase : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCamelCase : List[Any] = xmod_layer.adapter_layer_norm.weight
__lowerCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCamelCase : List[Any] = bert_output.adapter_modules[lang_code]
__lowerCamelCase : Optional[int] = xmod_layer.adapter_modules[lang_code]
__lowerCamelCase : str = from_adapter.fca.weight
__lowerCamelCase : List[Any] = from_adapter.fca.bias
__lowerCamelCase : int = from_adapter.fca.weight
__lowerCamelCase : Tuple = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCamelCase : str = xmod_sent_encoder.layer_norm.weight
__lowerCamelCase : Tuple = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCamelCase : Any = xmod.model.classification_heads['mnli'].dense.weight
__lowerCamelCase : Dict = xmod.model.classification_heads['mnli'].dense.bias
__lowerCamelCase : str = xmod.model.classification_heads['mnli'].out_proj.weight
__lowerCamelCase : Dict = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowerCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__lowerCamelCase : int = xmod.model.encoder.lm_head.dense.bias
__lowerCamelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase : Dict = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase : Dict = xmod.model.encoder.lm_head.weight
__lowerCamelCase : Optional[int] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase : Optional[Any] = xmod.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
__lowerCamelCase : int = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
__lowerCamelCase : Union[str, Any] = xmod.model(SCREAMING_SNAKE_CASE__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowerCamelCase : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowercase_ : List[str] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 713
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = 42
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any] , a: Optional[Any]=3 , a: Tuple=3 , a: str=("DownEncoderBlock2D",) , a: str=(64,) , a: Optional[int]=2 , a: int=32 , a: str="silu" , a: Optional[Any]=True , ):
super().__init__()
__lowerCamelCase : int = layers_per_block
__lowerCamelCase : List[Any] = torch.nn.Convad(
a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : Dict = nn.ModuleList([] )
# down
__lowerCamelCase : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(a ):
__lowerCamelCase : str = output_channel
__lowerCamelCase : Optional[int] = block_out_channels[i]
__lowerCamelCase : Dict = i == len(a ) - 1
__lowerCamelCase : List[Any] = get_down_block(
a , num_layers=self.layers_per_block , in_channels=a , out_channels=a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , )
self.down_blocks.append(a )
# mid
__lowerCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# out
__lowerCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a , eps=1e-6 )
__lowerCamelCase : Optional[Any] = nn.SiLU()
__lowerCamelCase : int = 2 * out_channels if double_z else out_channels
__lowerCamelCase : Tuple = nn.Convad(block_out_channels[-1] , a , 3 , padding=1 )
__lowerCamelCase : List[Any] = False
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase : List[str] = x
__lowerCamelCase : Dict = self.conv_in(a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a: int ):
def custom_forward(*a: Optional[Any] ):
return module(*a )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , use_reentrant=a )
# middle
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , use_reentrant=a )
else:
for down_block in self.down_blocks:
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a )
# middle
__lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a )
else:
# down
for down_block in self.down_blocks:
__lowerCamelCase : List[Any] = down_block(a )
# middle
__lowerCamelCase : Union[str, Any] = self.mid_block(a )
# post-process
__lowerCamelCase : Tuple = self.conv_norm_out(a )
__lowerCamelCase : List[str] = self.conv_act(a )
__lowerCamelCase : int = self.conv_out(a )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , a: List[str]=3 , a: Tuple=3 , a: str=("UpDecoderBlock2D",) , a: Union[str, Any]=(64,) , a: Optional[Any]=2 , a: Optional[Any]=32 , a: str="silu" , a: Union[str, Any]="group" , ):
super().__init__()
__lowerCamelCase : List[Any] = layers_per_block
__lowerCamelCase : Any = nn.Convad(
a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = nn.ModuleList([] )
__lowerCamelCase : Optional[Any] = in_channels if norm_type == 'spatial' else None
# mid
__lowerCamelCase : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# up
__lowerCamelCase : Any = list(reversed(a ) )
__lowerCamelCase : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a ):
__lowerCamelCase : List[Any] = output_channel
__lowerCamelCase : List[str] = reversed_block_out_channels[i]
__lowerCamelCase : Optional[Any] = i == len(a ) - 1
__lowerCamelCase : Optional[Any] = get_up_block(
a , num_layers=self.layers_per_block + 1 , in_channels=a , out_channels=a , prev_output_channel=a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , resnet_time_scale_shift=a , )
self.up_blocks.append(a )
__lowerCamelCase : List[str] = output_channel
# out
if norm_type == "spatial":
__lowerCamelCase : int = SpatialNorm(block_out_channels[0] , a )
else:
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a , eps=1e-6 )
__lowerCamelCase : Union[str, Any] = nn.SiLU()
__lowerCamelCase : List[Any] = nn.Convad(block_out_channels[0] , a , 3 , padding=1 )
__lowerCamelCase : List[str] = False
def _snake_case ( self: Optional[int] , a: Tuple , a: List[str]=None ):
__lowerCamelCase : List[str] = z
__lowerCamelCase : Union[str, Any] = self.conv_in(a )
__lowerCamelCase : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a: Any ):
def custom_forward(*a: str ):
return module(*a )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a , use_reentrant=a )
__lowerCamelCase : str = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , a , use_reentrant=a )
else:
# middle
__lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a )
__lowerCamelCase : int = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a , a )
else:
# middle
__lowerCamelCase : int = self.mid_block(a , a )
__lowerCamelCase : List[str] = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = up_block(a , a )
# post-process
if latent_embeds is None:
__lowerCamelCase : Optional[int] = self.conv_norm_out(a )
else:
__lowerCamelCase : Dict = self.conv_norm_out(a , a )
__lowerCamelCase : Any = self.conv_act(a )
__lowerCamelCase : str = self.conv_out(a )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] , a: List[Any] , a: List[Any] , a: List[Any] , a: Tuple=None , a: Tuple="random" , a: List[Any]=False , a: List[str]=True ):
super().__init__()
__lowerCamelCase : Optional[Any] = n_e
__lowerCamelCase : Optional[int] = vq_embed_dim
__lowerCamelCase : Tuple = beta
__lowerCamelCase : List[str] = legacy
__lowerCamelCase : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCamelCase : str = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
__lowerCamelCase : Dict = self.used.shape[0]
__lowerCamelCase : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCamelCase : Any = self.re_embed
__lowerCamelCase : Optional[int] = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCamelCase : int = n_e
__lowerCamelCase : Optional[Any] = sane_index_shape
def _snake_case ( self: Tuple , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = inds.shape
assert len(a ) > 1
__lowerCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : Any = self.used.to(a )
__lowerCamelCase : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCamelCase : Dict = match.argmax(-1 )
__lowerCamelCase : List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCamelCase : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCamelCase : str = self.unknown_index
return new.reshape(a )
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : List[Any] = inds.shape
assert len(a ) > 1
__lowerCamelCase : Optional[int] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : Union[str, Any] = self.used.to(a )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCamelCase : Optional[Any] = 0 # simply set to zero
__lowerCamelCase : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a )
return back.reshape(a )
def _snake_case ( self: int , a: List[str] ):
# reshape z -> (batch, height, width, channel) and flatten
__lowerCamelCase : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCamelCase : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCamelCase : int = torch.argmin(torch.cdist(a , self.embedding.weight ) , dim=1 )
__lowerCamelCase : str = self.embedding(a ).view(z.shape )
__lowerCamelCase : str = None
__lowerCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
__lowerCamelCase : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCamelCase : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCamelCase : int = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCamelCase : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCamelCase : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCamelCase : Optional[Any] = self.remap_to_used(a )
__lowerCamelCase : Dict = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self: Tuple , a: Optional[int] , a: Any ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__lowerCamelCase : Any = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCamelCase : Any = self.unmap_to_all(a )
__lowerCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCamelCase : str = self.embedding(a )
if shape is not None:
__lowerCamelCase : str = z_q.view(a )
# reshape back to match original input shape
__lowerCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: str , a: Dict , a: Any=False ):
__lowerCamelCase : Tuple = parameters
__lowerCamelCase , __lowerCamelCase : Any = torch.chunk(a , 2 , dim=1 )
__lowerCamelCase : List[str] = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
__lowerCamelCase : int = deterministic
__lowerCamelCase : Dict = torch.exp(0.5 * self.logvar )
__lowerCamelCase : str = torch.exp(self.logvar )
if self.deterministic:
__lowerCamelCase : Optional[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self: Union[str, Any] , a: Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
__lowerCamelCase : Union[str, Any] = randn_tensor(
self.mean.shape , generator=a , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCamelCase : str = self.mean + self.std * sample
return x
def _snake_case ( self: List[str] , a: Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self: Optional[Any] , a: str , a: Any=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCamelCase : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a )
def _snake_case ( self: Optional[int] ):
return self.mean
| 230
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[Any]=13 , _snake_case : Union[str, Any]=7 , _snake_case : Dict=True , _snake_case : Any=True , _snake_case : Dict=True , _snake_case : Optional[Any]=True , _snake_case : Optional[Any]=99 , _snake_case : Any=64 , _snake_case : Dict=32 , _snake_case : Optional[int]=5 , _snake_case : List[str]=4 , _snake_case : str=37 , _snake_case : List[Any]="gelu" , _snake_case : str=0.1 , _snake_case : Tuple=0.1 , _snake_case : Any=512 , _snake_case : List[str]=16 , _snake_case : Optional[Any]=2 , _snake_case : List[Any]=0.02 , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=4 , _snake_case : Optional[Any]=None , ):
__lowercase : Optional[int] = parent
__lowercase : str = batch_size
__lowercase : List[str] = seq_length
__lowercase : str = is_training
__lowercase : List[str] = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : Optional[Any] = use_labels
__lowercase : Any = vocab_size
__lowercase : Any = hidden_size
__lowercase : List[str] = embedding_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : Optional[Any] = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : int = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : int = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : int = num_labels
__lowercase : Optional[Any] = num_choices
__lowercase : int = scope
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[str] = None
if self.use_input_mask:
__lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
if self.use_token_type_ids:
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[int] = None
__lowercase : Any = None
__lowercase : Dict = None
if self.use_labels:
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : List[Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def snake_case_ ( self : Union[str, Any] , _snake_case : str , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Any ):
__lowercase : Optional[int] = MegatronBertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Tuple = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
__lowercase : Tuple = model(_snake_case , token_type_ids=_snake_case )
__lowercase : str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : str , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Dict ):
__lowercase : str = MegatronBertForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : List[Any] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : List[str] , _snake_case : str , _snake_case : Dict ):
__lowercase : Optional[Any] = MegatronBertForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Optional[int] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Tuple , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Optional[Any] ):
__lowercase : Any = MegatronBertForNextSentencePrediction(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Any = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self : Optional[Any] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : str , _snake_case : Dict ):
__lowercase : Any = MegatronBertForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Optional[Any] = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , next_sentence_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self : Any , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : int , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
__lowercase : Optional[int] = MegatronBertForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : List[Any] = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : str , _snake_case : List[str] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] ):
__lowercase : Optional[int] = self.num_labels
__lowercase : Dict = MegatronBertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : str = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : int , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[Any] ):
__lowercase : Any = self.num_labels
__lowercase : Optional[Any] = MegatronBertForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Dict = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : str , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : int , _snake_case : str , _snake_case : List[str] ):
__lowercase : Optional[int] = self.num_choices
__lowercase : Optional[int] = MegatronBertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Any = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : Optional[int] ):
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : str = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Any = True
# test_resize_embeddings = False
A__ : Dict = False
def snake_case_ ( self : str , _snake_case : Dict , _snake_case : Dict , _snake_case : Any=False ):
__lowercase : Optional[Any] = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
__lowercase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case )
__lowercase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def snake_case_ ( self : Any ):
__lowercase : str = MegatronBertModelTester(self )
__lowercase : int = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def snake_case_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def snake_case_ ( self : Dict ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_snake_case )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_snake_case )
def snake_case_ ( self : Any ):
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_snake_case )
def snake_case_ ( self : Tuple ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_snake_case )
def snake_case_ ( self : Optional[Any] ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_snake_case )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
__lowerCAmelCase : int = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__lowercase : List[Any] = os.path.join(os.environ['''MYDIR'''] , _snake_case )
__lowercase : Tuple = MegatronBertModel.from_pretrained(_snake_case )
model.to(_snake_case )
model.half()
__lowercase : int = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
__lowercase : Union[str, Any] = model(_snake_case )[0]
__lowercase : Tuple = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _snake_case )
__lowercase : Optional[Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
__lowercase : Union[str, Any] = output[0, ii, jj]
__lowercase : int = expected[3 * ii + jj]
__lowercase : Union[str, Any] = '''ii={} jj={} a={} b={}'''.format(_snake_case , _snake_case , _snake_case , _snake_case )
self.assertTrue(math.isclose(_snake_case , _snake_case , rel_tol=_snake_case , abs_tol=_snake_case ) , msg=_snake_case )
| 509
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Tuple ):
# A mock response for an HTTP head request to emulate server down
__lowercase : List[Any] = mock.Mock()
__lowercase : Optional[Any] = 500
__lowercase : List[Any] = {}
__lowercase : str = HTTPError
__lowercase : Optional[int] = {}
# Download this model to make sure it's in the cache.
__lowercase : str = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_snake_case ) as mock_head:
__lowercase : Optional[int] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def snake_case_ ( self : Any ):
# A mock response for an HTTP head request to emulate server down
__lowercase : int = mock.Mock()
__lowercase : List[str] = 500
__lowercase : int = {}
__lowercase : List[Any] = HTTPError
__lowercase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
__lowercase : Dict = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_snake_case ) as mock_head:
__lowercase : Union[str, Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self : int ):
# This test is for deprecated behavior and can be removed in v5
try:
__lowercase : Tuple = tempfile.mktemp()
with open(_snake_case , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , _snake_case )
__lowercase : List[str] = AlbertTokenizer.from_pretrained(_snake_case )
finally:
os.remove(_snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , _snake_case )
__lowercase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def snake_case_ ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
__lowercase : List[str] = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def snake_case_ ( cls : Optional[Any] ):
__lowercase : List[str] = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def snake_case_ ( cls : Optional[int] ):
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def snake_case_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : List[Any] = os.path.join(_snake_case , '''vocab.txt''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowercase : Optional[Any] = BertTokenizer(_snake_case )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
__lowercase : List[str] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case , repo_id='''test-tokenizer''' , push_to_hub=_snake_case , use_auth_token=self._token )
__lowercase : List[str] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def snake_case_ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : List[str] = os.path.join(_snake_case , '''vocab.txt''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowercase : Tuple = BertTokenizer(_snake_case )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
__lowercase : Any = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_snake_case , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__lowercase : int = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def snake_case_ ( self : Dict ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : Any = os.path.join(_snake_case , '''vocab.txt''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowercase : Tuple = CustomTokenizer(_snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : List[str] = os.path.join(_snake_case , '''vocab.txt''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained(_snake_case )
bert_tokenizer.save_pretrained(_snake_case )
__lowercase : List[Any] = CustomTokenizerFast.from_pretrained(_snake_case )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
__lowercase : Tuple = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=_snake_case , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def snake_case_ ( self : int ):
__lowercase : int = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def snake_case_ ( self : Dict ):
__lowercase : List[str] = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def snake_case_ ( self : int ):
__lowercase : Optional[int] = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[str] = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def snake_case_ ( self : Any ):
__lowercase : str = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def snake_case_ ( self : Tuple ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowercase : List[str] = Trie()
__lowercase : Optional[Any] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_snake_case , ['''AB''', '''C'''] )
| 509
| 1
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 664
|
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 664
| 1
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar("""T""")
def _snake_case ( __snake_case : int ):
"""simple docstring"""
return (position - 1) // 2
def _snake_case ( __snake_case : int ):
"""simple docstring"""
return (2 * position) + 1
def _snake_case ( __snake_case : int ):
"""simple docstring"""
return (2 * position) + 2
class lowercase__ ( Generic[T] ):
def __init__( self) -> None:
_lowerCamelCase : list[tuple[T, int]] = []
_lowerCamelCase : dict[T, int] = {}
_lowerCamelCase : int = 0
def __len__( self) -> int:
return self.elements
def __repr__( self) -> str:
return str(self.heap)
def UpperCamelCase_ ( self) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight))
_lowerCamelCase : Any = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
_lowerCamelCase , _lowerCamelCase : Dict = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowerCamelCase , _lowerCamelCase : Tuple = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE)
return elem
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
# Update the weight of the given key
_lowerCamelCase : List[Any] = self.position_map[elem]
_lowerCamelCase : Tuple = (elem, weight)
if position > 0:
_lowerCamelCase : List[str] = get_parent_position(SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase : int = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE)
else:
self._bubble_down(SCREAMING_SNAKE_CASE)
else:
self._bubble_down(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_lowerCamelCase : int = self.position_map[elem]
if curr_pos == 0:
return None
_lowerCamelCase : List[Any] = get_parent_position(SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase : Dict = self.heap[curr_pos]
_lowerCamelCase , _lowerCamelCase : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return self._bubble_up(SCREAMING_SNAKE_CASE)
return None
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_lowerCamelCase : int = self.position_map[elem]
_lowerCamelCase , _lowerCamelCase : Dict = self.heap[curr_pos]
_lowerCamelCase : str = get_child_left_position(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = get_child_right_position(SCREAMING_SNAKE_CASE)
if child_left_position < self.elements and child_right_position < self.elements:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.heap[child_left_position]
_lowerCamelCase , _lowerCamelCase : Dict = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return self._bubble_down(SCREAMING_SNAKE_CASE)
if child_left_position < self.elements:
_lowerCamelCase , _lowerCamelCase : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return self._bubble_down(SCREAMING_SNAKE_CASE)
else:
return None
if child_right_position < self.elements:
_lowerCamelCase , _lowerCamelCase : int = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return self._bubble_down(SCREAMING_SNAKE_CASE)
return None
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
# Swap the nodes at the given positions
_lowerCamelCase : List[str] = self.heap[nodea_pos][0]
_lowerCamelCase : Dict = self.heap[nodea_pos][0]
_lowerCamelCase , _lowerCamelCase : List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowerCamelCase : Optional[Any] = nodea_pos
_lowerCamelCase : int = nodea_pos
class lowercase__ ( Generic[T] ):
def __init__( self) -> None:
_lowerCamelCase : dict[T, dict[T, int]] = {}
_lowerCamelCase : int = 0
def __repr__( self) -> str:
return str(self.connections)
def __len__( self) -> int:
return self.nodes
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_lowerCamelCase : int = {}
self.nodes += 1
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE)
self.add_node(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = weight
_lowerCamelCase : int = weight
def _snake_case ( __snake_case : GraphUndirectedWeighted[T] , ):
"""simple docstring"""
_lowerCamelCase : dict[T, int] = {node: maxsize for node in graph.connections}
_lowerCamelCase : dict[T, T | None] = {node: None for node in graph.connections}
_lowerCamelCase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__snake_case , __snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowerCamelCase : List[str] = priority_queue.extract_min()
_lowerCamelCase : str = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCamelCase : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__snake_case , dist[neighbour] )
_lowerCamelCase : Tuple = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowerCamelCase : List[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCamelCase : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__snake_case , dist[neighbour] )
_lowerCamelCase : List[str] = node
return dist, parent
| 88
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowerCamelCase = None
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
_lowerCamelCase = {
'google/rembert': 2_56,
}
_lowerCamelCase = '▁'
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Dict = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[Any] = RemBertTokenizer
def __init__( self : List[str] , __snake_case : int=None , __snake_case : str=None , __snake_case : Optional[int]=True , __snake_case : Dict=True , __snake_case : Optional[int]=False , __snake_case : Tuple="[CLS]" , __snake_case : Any="[SEP]" , __snake_case : Dict="<unk>" , __snake_case : List[str]="[SEP]" , __snake_case : Dict="<pad>" , __snake_case : str="[CLS]" , __snake_case : Union[str, Any]="[MASK]" , **__snake_case : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
def lowerCamelCase_ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : List[str] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__snake_case ) )
return
UpperCAmelCase_ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 144
| 0
|
from typing import List
import numpy as np
def a ( snake_case__: dict ):
'''simple docstring'''
lowercase_ = {key: len(snake_case__ ) for key, value in gen_kwargs.items() if isinstance(snake_case__ , snake_case__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
lowercase_ = max(lists_lengths.values() , default=0 )
return max(1 , snake_case__ )
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = []
for group_idx in range(snake_case__ ):
lowercase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase_ = range(snake_case__ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case__ )
return shards_indices_per_group
def a ( snake_case__: dict , snake_case__: int ):
'''simple docstring'''
lowercase_ = _number_of_shards_in_gen_kwargs(snake_case__ )
if num_shards == 1:
return [dict(snake_case__ )]
else:
lowercase_ = _distribute_shards(num_shards=snake_case__ , max_num_jobs=snake_case__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case__ , snake_case__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case__ ) )
]
def a ( snake_case__: List[dict] ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a ( snake_case__: np.random.Generator , snake_case__: dict ):
'''simple docstring'''
lowercase_ = {len(snake_case__ ) for value in gen_kwargs.values() if isinstance(snake_case__ , snake_case__ )}
lowercase_ = {}
for size in list_sizes:
lowercase_ = list(range(snake_case__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase_ = dict(snake_case__ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = [value[i] for i in indices_per_size[len(snake_case__ )]]
return shuffled_kwargs
| 409
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__a = get_logger(__name__)
def a ( snake_case__: Optional[Any] , snake_case__: Any , snake_case__: int , snake_case__: int , snake_case__: Optional[Any]=0 ):
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowercase_ = os.path.join(snake_case__ , snake_case__ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ = os.path.join(snake_case__ , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'''Saving model to {ckpt_dir}''' )
lowercase_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def a ( snake_case__: Any , snake_case__: Optional[Any] , snake_case__: Optional[Any] , snake_case__: Union[str, Any] , snake_case__: Tuple=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Loading model from {input_model_file}''' )
lowercase_ = torch.load(snake_case__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Loading model from {input_model_file}''' )
lowercase_ = torch.load(snake_case__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ = (
os.path.join(snake_case__ , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
lowercase_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , )
lowercase_ = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(snake_case__ )
def a ( snake_case__: Dict , snake_case__: str , snake_case__: Optional[Any] , snake_case__: Any , snake_case__: str , snake_case__: str=0 ):
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ = FSDP.optim_state_dict(snake_case__ , snake_case__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
lowercase_ = os.path.join(snake_case__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Dict , snake_case__: Union[str, Any] , snake_case__: str , snake_case__: List[str]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
lowercase_ = torch.load(snake_case__ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
lowercase_ = (
os.path.join(snake_case__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
lowercase_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , )
lowercase_ = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
lowercase_ = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ )
optimizer.load_state_dict(snake_case__ )
| 409
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
_lowercase : Optional[Any] = sd_pipe.to(lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
sd_pipe.set_scheduler('sample_euler')
_lowercase : str = 'A painting of a squirrel eating a burger'
_lowercase : int = torch.manual_seed(0)
_lowercase : int = sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=9.0, num_inference_steps=20, output_type='np')
_lowercase : Union[str, Any] = output.images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Dict = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : int = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_lowercase : Optional[int] = sd_pipe.to(lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
sd_pipe.set_scheduler('sample_euler')
_lowercase : Any = 'A painting of a squirrel eating a burger'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=9.0, num_inference_steps=20, output_type='np')
_lowercase : List[str] = output.images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Tuple = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-1
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_lowercase : List[str] = sd_pipe.to(lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
sd_pipe.set_scheduler('sample_dpmpp_2m')
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=15, output_type='np', use_karras_sigmas=lowerCamelCase, )
_lowercase : List[Any] = output.images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Dict = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 89
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self ):
snake_case__ : List[str] =""""""
snake_case__ : List[Any] =""""""
snake_case__ : Optional[int] =[]
snake_case__ : Tuple =0
snake_case__ : Optional[Any] =2_5_6
snake_case__ : Optional[Any] =0
snake_case__ : str =0
snake_case__ : Any =0
snake_case__ : Dict =0
def lowercase__ ( self , a ):
snake_case__ : List[str] =cva.imread(a , 0 )
snake_case__ : Optional[Any] =copy.deepcopy(self.img )
snake_case__ , snake_case__ , snake_case__ : Any =plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
snake_case__ : Tuple =np.sum(a )
for i in range(len(a ) ):
snake_case__ : Union[str, Any] =x[i] / self.k
self.sk += prk
snake_case__ : Tuple =(self.L - 1) * self.sk
if self.rem != 0:
snake_case__ : int =int(last % last )
snake_case__ : List[Any] =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(a )
snake_case__ : List[Any] =int(np.ma.count(self.img ) / self.img[1].size )
snake_case__ : Union[str, Any] =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case__ : Optional[int] =self.img[j][i]
if num != self.last_list[num]:
snake_case__ : Any =self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def lowercase__ ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def lowercase__ ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowerCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 385
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : Any ={
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict =["""ChineseCLIPFeatureExtractor"""]
UpperCAmelCase : List[str] =["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] =[
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 504
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _lowerCAmelCase (_lowerCAmelCase):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
UpperCAmelCase : Dict ="""
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _lowercase (a_ ):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case__ , required=snake_case__ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case__ , required=snake_case__ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case__ , required=snake_case__ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case__ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case__ , default=snake_case__ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F"""Loading model {model_type}""" )
UpperCamelCase_ = model_type
UpperCamelCase_ = tf_checkpoint
UpperCamelCase_ = pytorch_dump_output
UpperCamelCase_ = config
UpperCamelCase_ = finetuning_task_name
def _lowerCamelCase ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase_ = self._tf_checkpoint
UpperCamelCase_ = ""
else:
UpperCamelCase_ = self._tf_checkpoint
UpperCamelCase_ = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case__ , self._config , self._pytorch_dump_output , snake_case__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 504
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
UpperCAmelCase_ : List[Any] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCAmelCase_ : List[Any] = test_metrics
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self.test_metrics.main()
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ : List[str] = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
| 95
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
A = logging.get_logger(__name__)
A = 'T5Config'
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
| 187
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE__ : str = """pt"""
elif is_tf_available():
SCREAMING_SNAKE_CASE__ : Optional[Any] = """tf"""
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = """jax"""
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = PerceiverTokenizer
a__ = False
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Optional[int] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2_0 , __lowerCAmelCase=5 ):
"""simple docstring"""
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ :Optional[int] = []
for i in range(len(__lowerCAmelCase ) ):
try:
__magic_name__ :Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ :Tuple = list(filter(lambda __lowerCAmelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowerCAmelCase ) )
__magic_name__ :Optional[Any] = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
__magic_name__ :List[Any] = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
__magic_name__ :Dict = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ :int = [t[0] for t in toks]
# Ensure consistency
__magic_name__ :Dict = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
__magic_name__ :Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
__magic_name__ :Optional[Any] = ''' ''' + output_txt
__magic_name__ :Tuple = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.perceiver_tokenizer
__magic_name__ :Tuple = '''Unicode €.'''
__magic_name__ :List[Any] = tokenizer(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase )
# decoding
__magic_name__ :Union[str, Any] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '''[CLS]Unicode €.[SEP]''' )
__magic_name__ :List[Any] = tokenizer('''e è é ê ë''' )
__magic_name__ :List[str] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase )
# decoding
__magic_name__ :List[Any] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.perceiver_tokenizer
__magic_name__ :List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__magic_name__ :List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
__magic_name__ :Optional[int] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
__magic_name__ :List[str] = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ :Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.perceiver_tokenizer
__magic_name__ :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__magic_name__ :Dict = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowerCAmelCase )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertNotIn('''decoder_input_ids''' , __lowerCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.perceiver_tokenizer
__magic_name__ :Optional[int] = [
'''Summary of the text.''',
'''Another summary.''',
]
__magic_name__ :int = tokenizer(
text_target=__lowerCAmelCase , max_length=3_2 , padding='''max_length''' , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def A ( self ):
"""simple docstring"""
# safety check on max_len default value so we are sure the test works
__magic_name__ :List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__magic_name__ :Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ :Optional[int] = tempfile.mkdtemp()
__magic_name__ :List[str] = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
__magic_name__ :int = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
__magic_name__ :int = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
__magic_name__ :Tuple = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ :Any = tempfile.mkdtemp()
__magic_name__ :Dict = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__magic_name__ :Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__magic_name__ :Tuple = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
__magic_name__ :Any = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__magic_name__ :Optional[int] = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ :List[Any] = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ :int = json.load(__lowerCAmelCase )
__magic_name__ :Optional[int] = [F'''<extra_id_{i}>''' for i in range(1_2_5 )]
__magic_name__ :List[str] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__magic_name__ :Union[str, Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ :Optional[Any] = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ :Any = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowerCAmelCase )]
__magic_name__ :str = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__magic_name__ :Tuple = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ :List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__magic_name__ :List[str] = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
| 180
|
from __future__ import annotations
from math import pi, sqrt
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180
| 1
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A :
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int=14 , _UpperCAmelCase : Union[str, Any]=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=False , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Any=0.02 , ) -> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = rotary_dim
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = None
lowercase__ = vocab_size - 1
lowercase__ = vocab_size - 1
lowercase__ = vocab_size - 1
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = model_class_name(lowerCamelCase_ )
lowercase__ = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
lowercase__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowercase__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase__ = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
lowercase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowercase__ = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
lowercase__ = model(lowerCamelCase_ )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = model_class_name(lowerCamelCase_ )
lowercase__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase__ = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
lowercase__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase__ = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
lowercase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowercase__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
lowercase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class A ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
A__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = FlaxGPTJModelTester(self )
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowercase__ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
lowercase__ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowercase__ = False
lowercase__ = model.config.eos_token_id
lowercase__ = jax.jit(model.generate )
lowercase__ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase__ = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
lowercase__ = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ = getattr(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = pt_inputs['input_ids'].shape
lowercase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
lowercase__ = 0
lowercase__ = 1
lowercase__ = 0
lowercase__ = 1
lowercase__ = pt_model_class(lowerCamelCase_ ).eval()
lowercase__ = model_class(lowerCamelCase_ , dtype=jnp.floataa )
lowercase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
lowercase__ = fx_state
with torch.no_grad():
lowercase__ = pt_model(**lowerCamelCase_ ).to_tuple()
lowercase__ = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
lowercase__ = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
lowercase__ = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ = getattr(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = pt_model_class(lowerCamelCase_ ).eval()
lowercase__ = model_class(lowerCamelCase_ , dtype=jnp.floataa )
lowercase__ = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
lowercase__ = pt_inputs['input_ids'].shape
lowercase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
lowercase__ = 0
lowercase__ = 1
lowercase__ = 0
lowercase__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ = pt_model(**lowerCamelCase_ ).to_tuple()
lowercase__ = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
lowercase__ = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
lowercase__ = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 15
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : DatasetDict , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int = 1_6 ) ->List[str]:
lowerCamelCase__ : Union[str, Any] =AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Dict =DatasetDict(
{
'train': dataset['train'].select(snake_case_ ),
'validation': dataset['train'].select(snake_case_ ),
'test': dataset['validation'],
} )
def tokenize_function(snake_case_ : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Tuple =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : List[str] =datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Dict =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : int =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : str =1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : Union[str, Any] =8
else:
lowerCamelCase__ : int =None
return tokenizer.pad(
snake_case_ , padding='longest' , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[int] =DataLoader(
tokenized_datasets['train'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
lowerCamelCase__ : int =DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
lowerCamelCase__ : List[Any] =DataLoader(
tokenized_datasets['test'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) ->Union[str, Any]:
# New Code #
lowerCamelCase__ : Optional[int] =[]
# Download the dataset
lowerCamelCase__ : Optional[int] =load_dataset('glue' , 'mrpc' )
# Create our splits
lowerCamelCase__ : List[Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCamelCase__ : List[str] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Any =config['lr']
lowerCamelCase__ : List[str] =int(config['num_epochs'] )
lowerCamelCase__ : List[Any] =int(config['seed'] )
lowerCamelCase__ : Tuple =int(config['batch_size'] )
lowerCamelCase__ : List[str] =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ : Union[str, Any] =batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ : Any =MAX_GPU_BATCH_SIZE
set_seed(snake_case_ )
# New Code #
# Create our folds:
lowerCamelCase__ : Tuple =kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowerCamelCase__ : List[str] =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case_ ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =get_fold_dataloaders(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : int =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Tuple =AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
lowerCamelCase__ : str =get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=1_0_0 , num_training_steps=(len(snake_case_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ : Dict =model(**snake_case_ )
lowerCamelCase__ : str =outputs.loss
lowerCamelCase__ : Optional[int] =loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : Dict =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Tuple =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
lowerCamelCase__ : Dict =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
# New Code #
# We also run predictions on the test set at the very end
lowerCamelCase__ : Any =[]
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**snake_case_ )
lowerCamelCase__ : List[Any] =outputs.logits
lowerCamelCase__ , lowerCamelCase__ : str =accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCamelCase__ : Dict =torch.cat(snake_case_ , dim=0 )
lowerCamelCase__ : str =torch.stack(snake_case_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCamelCase__ : int =metric.compute(predictions=snake_case_ , references=snake_case_ )
accelerator.print('Average test metrics from all folds:' , snake_case_ )
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : Tuple =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=snake_case_ , default=3 , help='The number of splits to perform across the dataset' )
lowerCamelCase__ : Tuple =parser.parse_args()
lowerCamelCase__ : Optional[Any] ={'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 174
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 594
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_=125 , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowercase =[F'''<extra_id_{i}>''' for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowercase =len(set(filter(lambda lowerCAmelCase_ : bool("extra_id" in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
_lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
_lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
_lowercase =extra_ids
_lowercase =2**8 # utf is 8 bits
# define special tokens dict
_lowercase ={
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_lowercase =len(self.special_tokens_encoder )
_lowercase =len(lowerCAmelCase_ )
for i, token in enumerate(lowerCAmelCase_ ):
_lowercase =self.vocab_size + i - n
_lowercase ={v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase_ )) + [1]
return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =self._add_eos_if_not_present(lowerCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
_lowercase =self._add_eos_if_not_present(lowerCAmelCase_ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =[chr(lowerCAmelCase_ ) for i in text.encode("utf-8" )]
return tokens
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if token in self.special_tokens_encoder:
_lowercase =self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_lowercase =self.added_tokens_encoder[token]
elif len(lowerCAmelCase_ ) != 1:
_lowercase =self.unk_token_id
else:
_lowercase =ord(lowerCAmelCase_ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if index in self.special_tokens_decoder:
_lowercase =self.special_tokens_decoder[index]
else:
_lowercase =chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =b""
for token in tokens:
if token in self.special_tokens_decoder:
_lowercase =self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
_lowercase =self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
_lowercase =token.encode("utf-8" )
elif token in self.added_tokens_encoder:
_lowercase =token.encode("utf-8" )
else:
_lowercase =bytes([ord(lowerCAmelCase_ )] )
bstring += tok_string
_lowercase =bstring.decode("utf-8" , errors="ignore" )
return string
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
return ()
| 594
| 1
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__A : Tuple = parser.parse_args()
__A : Any = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__A : List[str] = CLIPImageProcessor()
__A : List[Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__A : Any = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 231
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "big_bird"
def __init__( self : Tuple , A : Tuple=5_03_58 , A : Any=7_68 , A : Union[str, Any]=12 , A : List[str]=12 , A : Dict=30_72 , A : int="gelu_new" , A : Optional[int]=0.1 , A : Optional[int]=0.1 , A : Dict=40_96 , A : Tuple=2 , A : Union[str, Any]=0.02 , A : str=1e-12 , A : Optional[Any]=True , A : Union[str, Any]=0 , A : Optional[int]=1 , A : Optional[int]=2 , A : Any=66 , A : List[Any]="block_sparse" , A : List[Any]=True , A : Union[str, Any]=False , A : Optional[Any]=64 , A : Optional[Any]=3 , A : Tuple=None , **A : Union[str, Any] , ) -> Tuple:
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , sep_token_id=A , **A , )
lowercase_ : Tuple = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : int = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[str] = type_vocab_size
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : int = use_cache
lowercase_ : int = rescale_embeddings
lowercase_ : Optional[Any] = attention_type
lowercase_ : str = use_bias
lowercase_ : Dict = block_size
lowercase_ : str = num_random_blocks
lowercase_ : Optional[Any] = classifier_dropout
class _UpperCAmelCase ( _A ):
@property
def A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase_ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 231
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_a : int= logging.getLogger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = """token-classification"""
def __init__(self : Tuple , _A : int) -> Union[str, Any]:
if type(_a) == dict:
__snake_case : str = Namespace(**_a)
__snake_case : Dict = import_module('tasks')
try:
__snake_case : Optional[Any] = getattr(_a , hparams.task_type)
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}")
__snake_case : Tuple = self.token_classification_task.get_labels(hparams.labels)
__snake_case : str = CrossEntropyLoss().ignore_index
super().__init__(_a , len(self.labels) , self.mode)
def _lowercase (self : int , **_A : Any) -> int:
return self.model(**_a)
def _lowercase (self : Optional[Any] , _A : Union[str, Any] , _A : int) -> Optional[Any]:
__snake_case : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case : str = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case : str = self(**_a)
__snake_case : Dict = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowercase (self : Dict) -> Optional[Any]:
__snake_case : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
__snake_case : Union[str, Any] = self._feature_file(_a)
if os.path.exists(_a) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , _a)
__snake_case : Any = torch.load(_a)
else:
logger.info('Creating features from dataset file at %s' , args.data_dir)
__snake_case : List[str] = self.token_classification_task.read_examples_from_file(args.data_dir , _a)
__snake_case : Optional[Any] = self.token_classification_task.convert_examples_to_features(
_a , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(self.config.model_type in ['xlnet']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , _a)
torch.save(_a , _a)
def _lowercase (self : Dict , _A : int , _A : int , _A : bool = False) -> str:
__snake_case : int = self._feature_file(_a)
logger.info('Loading features from cached file %s' , _a)
__snake_case : str = torch.load(_a)
__snake_case : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
__snake_case : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
__snake_case : int = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
__snake_case : Any = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
__snake_case : List[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(_a , _a , _a , _a) , batch_size=_a)
def _lowercase (self : Optional[int] , _A : List[str] , _A : Tuple) -> Optional[Any]:
"""Compute validation""" ""
__snake_case : List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case : Union[str, Any] = self(**_a)
__snake_case : int = outputs[:2]
__snake_case : List[Any] = logits.detach().cpu().numpy()
__snake_case : int = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase (self : Optional[Any] , _A : List[str]) -> Dict:
__snake_case : Tuple = torch.stack([x['val_loss'] for x in outputs]).mean()
__snake_case : Optional[Any] = np.concatenate([x['pred'] for x in outputs] , axis=0)
__snake_case : List[str] = np.argmax(_a , axis=2)
__snake_case : Dict = np.concatenate([x['target'] for x in outputs] , axis=0)
__snake_case : Tuple = dict(enumerate(self.labels))
__snake_case : Optional[int] = [[] for _ in range(out_label_ids.shape[0])]
__snake_case : List[Any] = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
__snake_case : Union[str, Any] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(_a , _a),
"""precision""": precision_score(_a , _a),
"""recall""": recall_score(_a , _a),
"""f1""": fa_score(_a , _a),
}
__snake_case : Any = dict(results.items())
__snake_case : Any = results
return ret, preds_list, out_label_list
def _lowercase (self : Optional[Any] , _A : Any) -> Union[str, Any]:
__snake_case : Tuple = self._eval_end(_a)
__snake_case : int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase (self : Tuple , _A : List[Any]) -> List[str]:
__snake_case : Tuple = self._eval_end(_a)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__snake_case : str = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase (_A : Tuple , _A : str) -> Optional[Any]:
BaseTransformer.add_model_specific_args(_a , _a)
parser.add_argument(
'--task_type' , default='NER' , type=_a , help='Task type to fine tune in training (e.g. NER, POS, etc)')
parser.add_argument(
'--max_seq_length' , default=1_28 , type=_a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=_a , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=_a , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets')
return parser
if __name__ == "__main__":
_a : str= argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_a : List[Any]= NERTransformer.add_model_specific_args(parser, os.getcwd())
_a : Dict= parser.parse_args()
_a : int= NERTransformer(args)
_a : List[str]= generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_a : Any= sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
_a : Any= model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 705
|
"""simple docstring"""
from __future__ import annotations
_a : str= "#"
class UpperCamelCase :
def __init__(self : str) -> None:
__snake_case : dict = {}
def _lowercase (self : Union[str, Any] , _A : str) -> None:
__snake_case : Optional[Any] = self._trie
for char in text:
if char not in trie:
__snake_case : Optional[int] = {}
__snake_case : Optional[int] = trie[char]
__snake_case : Optional[int] = True
def _lowercase (self : List[str] , _A : str) -> tuple | list:
__snake_case : str = self._trie
for char in prefix:
if char in trie:
__snake_case : Optional[int] = trie[char]
else:
return []
return self._elements(_A)
def _lowercase (self : List[Any] , _A : dict) -> tuple:
__snake_case : str = []
for c, v in d.items():
__snake_case : Optional[Any] = [' '] if c == END else [(c + s) for s in self._elements(_A)]
result.extend(_A)
return tuple(_A)
_a : str= Trie()
_a : Any= ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> tuple:
'''simple docstring'''
__snake_case : Optional[int] = trie.find_word(UpperCAmelCase_ )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192
| 0
|
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1
| 0
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowercase__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=sys.maxsize ) -> Union[str, Any]:
lowercase_ = '''bilinear'''
lowercase_ = max_size
lowercase_ = short_edge_length
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
lowercase_ = []
for img in imgs:
lowercase_ , lowercase_ = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase_ = size * 1.0 / min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if h < w:
lowercase_ , lowercase_ = size, scale * w
else:
lowercase_ , lowercase_ = scale * h, size
if max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > self.max_size:
lowercase_ = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = newh * scale
lowercase_ = neww * scale
lowercase_ = int(neww + 0.5 )
lowercase_ = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase_ = Image.fromarray(SCREAMING_SNAKE_CASE_ )
lowercase_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase_ = np.asarray(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase_ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE_ ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE_ )
return img_augs
class lowercase__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> str:
lowercase_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase_ = cfg.INPUT.FORMAT
lowercase_ = cfg.SIZE_DIVISIBILITY
lowercase_ = cfg.PAD_VALUE
lowercase_ = cfg.INPUT.MAX_SIZE_TEST
lowercase_ = cfg.MODEL.DEVICE
lowercase_ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ = lambda SCREAMING_SNAKE_CASE_ : (x - self.pixel_mean) / self.pixel_std
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
lowercase_ = tuple(max(SCREAMING_SNAKE_CASE_ ) for s in zip(*[img.shape for img in images] ) )
lowercase_ = [im.shape[-2:] for im in images]
lowercase_ = [
nn.functional.pad(
SCREAMING_SNAKE_CASE_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
return torch.stack(SCREAMING_SNAKE_CASE_ ), torch.tensor(SCREAMING_SNAKE_CASE_ )
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=False ) -> Any:
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE_ ) == 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE_ , images.pop(SCREAMING_SNAKE_CASE_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE_ , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase_ = torch.tensor([im.shape[:2] for im in images] )
lowercase_ = self.aug(SCREAMING_SNAKE_CASE_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase_ = [self.normalizer(SCREAMING_SNAKE_CASE_ ) for x in images]
# now pad them to do the following operations
lowercase_ , lowercase_ = self.pad(SCREAMING_SNAKE_CASE_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase_ = torch.true_divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a ( snake_case__: Tuple , snake_case__: Dict ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a ( snake_case__: List[str] , snake_case__: Tuple[int, int] ):
'''simple docstring'''
assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!"
lowercase_ , lowercase_ = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__ )
tensor[:, 1].clamp_(min=0 , max=snake_case__ )
tensor[:, 2].clamp_(min=0 , max=snake_case__ )
tensor[:, 3].clamp_(min=0 , max=snake_case__ )
| 409
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Dict = 'falcon'
a :Optional[Any] = ['past_key_values']
def __init__( self : str , SCREAMING_SNAKE_CASE_ : List[str]=6_5_0_2_4 , SCREAMING_SNAKE_CASE_ : Tuple=4_5_4_4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Tuple=1_1 , SCREAMING_SNAKE_CASE_ : int=1_1 , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[Any]:
lowercase_ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase_ = kwargs.pop('''n_embed''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size if n_embed is None else n_embed
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = layer_norm_epsilon
lowercase_ = initializer_range
lowercase_ = use_cache
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = bos_token_id
lowercase_ = eos_token_id
lowercase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase_ = alibi
lowercase_ = new_decoder_architecture
lowercase_ = multi_query # Ignored when new_decoder_architecture is True
lowercase_ = parallel_attn
lowercase_ = bias
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Tuple ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return not self.alibi
| 409
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """dandelin/vilt-b32-finetuned-vqa"""
__lowercase = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__lowercase = """image_qa"""
__lowercase = AutoProcessor
__lowercase = AutoModelForVisualQuestionAnswering
__lowercase = ["""image""", """text"""]
__lowercase = ["""text"""]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return self.pre_processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCAmelCase_ ).logits
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 495
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Optional[Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 495
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _lowerCamelCase (lowerCamelCase ):
lowercase__ = 42
lowercase__ = 42
class _lowerCamelCase (lowerCamelCase , lowerCamelCase ):
lowercase__ = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 2_000 , SCREAMING_SNAKE_CASE_ = 0.1_5 , SCREAMING_SNAKE_CASE_ = 0.0_1 , SCREAMING_SNAKE_CASE_ = 1_3_4_8.0 , SCREAMING_SNAKE_CASE_ = 1E-5 , SCREAMING_SNAKE_CASE_ = 1 , ):
# standard deviation of the initial noise distribution
__snake_case = sigma_max
# setable values
__snake_case = None
self.set_sigmas(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
return sample
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ):
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case = torch.linspace(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ):
__snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE_ ) , math.log(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
__snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case = timesteps.to(self.discrete_sigmas.device )
__snake_case = self.discrete_sigmas[timesteps].to(sample.device )
__snake_case = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(sample.device )
__snake_case = torch.zeros_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__snake_case = diffusion.unsqueeze(-1 )
__snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE_ , device=sample.device , dtype=sample.dtype )
__snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE_ , prev_sample_mean=SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__snake_case = step_size.unsqueeze(-1 )
__snake_case = sample + step_size * model_output
__snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case = timesteps.to(original_samples.device )
__snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
__snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE_ ) * sigmas[:, None, None, None]
)
__snake_case = noise + original_samples
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 345
|
from __future__ import annotations
from typing import Any
def __lowercase( __snake_case : list ) -> int:
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__snake_case ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
| 1
|
"""simple docstring"""
import functools
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# Validation
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowerCAmelCase__ ) != 3 or not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowerCAmelCase__ ) == 0:
return 0
if min(lowerCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowerCAmelCase__ ) >= 366:
raise ValueError("All days elements should be less than 366" )
UpperCAmelCase_ = set(lowerCAmelCase__ )
@functools.cache
def dynamic_programming(lowerCAmelCase__ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82
| 1
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = FlaxAutoencoderKL
@property
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.uniform(__lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
| 139
|
lowercase__ : Optional[int] = range(2, 20 + 1)
lowercase__ : List[str] = [10**k for k in range(ks[-1] + 1)]
lowercase__ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = sum(a_i[j] for j in range(_A , len(_A ) ) )
snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(_A ) , _A ) ) )
snake_case_ , snake_case_ = 0, 0
snake_case_ = n - i
snake_case_ = memo.get(_A )
if sub_memo is not None:
snake_case_ = sub_memo.get(_A )
if jumps is not None and len(_A ) > 0:
# find and make the largest jump without going over
snake_case_ = -1
for _k in range(len(_A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ = _k
break
if max_jump >= 0:
snake_case_ , snake_case_ , snake_case_ = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ = diff + c
for j in range(min(_A , len(_A ) ) ):
snake_case_ , snake_case_ = divmod(_A , 10 )
if new_c > 0:
add(_A , _A , _A )
else:
snake_case_ = []
else:
snake_case_ = {c: []}
snake_case_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ , snake_case_ = next_term(_A , k - 1 , i + dn , _A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ , snake_case_ = compute(_A , _A , i + dn , _A )
diff += _diff
dn += terms_jumped
snake_case_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ = 0
while j < len(_A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_A , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(_A ):
a_i.extend([0 for _ in range(k - len(_A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ = i
snake_case_ , snake_case_ , snake_case_ = 0, 0, 0
for j in range(len(_A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ = ds_c + ds_b
diff += addend
snake_case_ = 0
for j in range(_A ):
snake_case_ = a_i[j] + addend
snake_case_ , snake_case_ = divmod(_A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_A , _A , _A )
return diff, i - start_i
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
for j in range(_A , len(_A ) ):
snake_case_ = digits[j] + addend
if s >= 10:
snake_case_ , snake_case_ = divmod(_A , 10 )
snake_case_ = addend // 10 + quotient
else:
snake_case_ = s
snake_case_ = addend // 10
if addend == 0:
break
while addend > 0:
snake_case_ , snake_case_ = divmod(_A , 10 )
digits.append(_A )
def lowerCamelCase__ ( _A = 10**15 ):
'''simple docstring'''
snake_case_ = [1]
snake_case_ = 1
snake_case_ = 0
while True:
snake_case_ , snake_case_ = next_term(_A , 20 , i + dn , _A )
dn += terms_jumped
if dn == n - i:
break
snake_case_ = 0
for j in range(len(_A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139
| 1
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__magic_name__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__magic_name__ = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase__( ):
__snake_case : Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__snake_case : Optional[int] = bs[:]
__snake_case : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
__snake_case : str = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] ):
__snake_case : Optional[int] = set()
__snake_case : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : Dict = char
return pairs
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
__snake_case : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
__snake_case : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
__snake_case : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
__snake_case : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
__snake_case : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__snake_case : Dict = json.load(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : List[Any] = errors # how to handle errors in decoding
__snake_case : Tuple = bytes_to_unicode()
__snake_case : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle:
__snake_case : int = merges_handle.read().split('\n' )[1:-1]
__snake_case : str = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : Optional[int] = {}
__snake_case : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case : List[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase_ ( self ):
return len(self.encoder )
def lowercase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , _UpperCAmelCase ):
if token in self.cache:
return self.cache[token]
__snake_case : int = tuple(_UpperCAmelCase )
__snake_case : List[Any] = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
__snake_case : Tuple = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case : str = bigram
__snake_case : str = []
__snake_case : str = 0
while i < len(_UpperCAmelCase ):
try:
__snake_case : Optional[int] = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case : Any = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case : str = tuple(_UpperCAmelCase )
__snake_case : List[str] = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
__snake_case : List[Any] = get_pairs(_UpperCAmelCase )
__snake_case : Tuple = ' '.join(_UpperCAmelCase )
__snake_case : int = word
return word
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Union[str, Any] = []
for token in re.findall(self.pat , _UpperCAmelCase ):
__snake_case : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(' ' ) )
return bpe_tokens
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[str] = ''.join(_UpperCAmelCase )
__snake_case : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' )
__snake_case : Union[str, Any] = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__snake_case : Optional[Any] = token_index
writer.write(' '.join(_UpperCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
__snake_case : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ):
__snake_case : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
__snake_case : Optional[int] = ' ' + text
return (text, kwargs)
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__snake_case : str = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
__snake_case : int = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case : Tuple = len(encoded_inputs['global_attention_mask'] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
__snake_case : Dict = len(_UpperCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case : Optional[int] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case : List[str] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 576
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__magic_name__ = '''bart'''
__magic_name__ = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
if LOAD_DENSE_INDEX:
__snake_case : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__snake_case : List[Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__snake_case : Any = qar_model.eval()
else:
__snake_case , __snake_case : Optional[int] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : Tuple = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__snake_case : List[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__snake_case : List[str] = sas_model.eval()
else:
__snake_case , __snake_case : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
if LOAD_DENSE_INDEX:
__snake_case : List[Any] = faiss.StandardGpuResources()
__snake_case : List[Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__snake_case : Dict = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , )
__snake_case : List[str] = faiss.IndexFlatIP(1_28 )
__snake_case : int = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : str = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__snake_case : List[Any] = elia['train_eli5']
__snake_case : Dict = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) )
__snake_case : str = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__magic_name__ , __magic_name__ , __magic_name__ = load_indexes()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = load_models()
__magic_name__ , __magic_name__ = load_train_data()
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=10 ):
__snake_case : str = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
__snake_case , __snake_case : Union[str, Any] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[int] = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int="wiki40b" , __UpperCAmelCase : List[str]="dense" , __UpperCAmelCase : Any=10 ):
if source == "none":
__snake_case , __snake_case : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : int = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__snake_case , __snake_case : Dict = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCAmelCase , )
__snake_case : Union[str, Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__snake_case : Any = 'question: {} context: {}'.format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None),
} )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=64 , __UpperCAmelCase : int=2_56 , __UpperCAmelCase : int=False , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Union[str, Any]=0.95 , __UpperCAmelCase : Tuple=0.8 ):
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=10_24 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__magic_name__ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__magic_name__ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__magic_name__ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__magic_name__ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__magic_name__ = st.sidebar.checkbox('''Demo options''')
if demo_options:
__magic_name__ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__magic_name__ = action_list.index(action_st)
__magic_name__ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__magic_name__ = show_type == '''Show full text of passages'''
else:
__magic_name__ = 3
__magic_name__ = True
__magic_name__ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__magic_name__ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__magic_name__ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__magic_name__ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__magic_name__ = '''wiki40b'''
__magic_name__ = '''dense'''
__magic_name__ = '''beam'''
__magic_name__ = 2
__magic_name__ = 64
__magic_name__ = 256
__magic_name__ = None
__magic_name__ = None
__magic_name__ = st.sidebar.checkbox('''Generation options''')
if generate_options:
__magic_name__ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__magic_name__ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__magic_name__ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__magic_name__ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__magic_name__ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__magic_name__ = None
# start main text
__magic_name__ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__magic_name__ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__magic_name__ = st.text_input('''Enter your question here:''', '''''')
else:
__magic_name__ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__magic_name__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__magic_name__ = support_list[:10]
__magic_name__ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__magic_name__ , __magic_name__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__magic_name__ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__magic_name__ = res[1].strip()
if sec_titles == "":
__magic_name__ = '''[{}]({})'''.format(res[0], wiki_url)
else:
__magic_name__ = sec_titles.split(''' & ''')
__magic_name__ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__magic_name__ = find_nearest_training(question)
__magic_name__ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__magic_name__ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__magic_name__ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 576
| 1
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : int ) -> bool:
UpperCAmelCase : Dict = str(UpperCamelCase__ )
return len(UpperCamelCase__ ) == 9 and set(UpperCamelCase__ ) == set('''123456789''' )
def snake_case_ ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
UpperCAmelCase : List[Any] = 100002 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
UpperCAmelCase : Tuple = 1002003 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 720
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase__: Union[str, Any] = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def snake_case_ ( _lowerCAmelCase : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
UpperCAmelCase : Optional[int] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
UpperCAmelCase : Optional[Any] = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
UpperCAmelCase : int = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}")
| 528
| 0
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def lowerCAmelCase_ () -> int:
a_ : Optional[Any] = Github(os.environ["GITHUB_TOKEN"] )
a_ : Union[str, Any] = g.get_repo("huggingface/diffusers" )
a_ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
a_ : Optional[Any] = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 473
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self ) -> int:
a_ : Any = 1
a_ : str = 3
a_ : Dict = (3_2, 3_2)
a_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def A ( self ) -> Tuple:
torch.manual_seed(0 )
a_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def A ( self ) -> Any:
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A ( self ) -> List[Any]:
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
@property
def A ( self ) -> Optional[int]:
def extract(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
a_ : Optional[int] = torch.ones([0] )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def A ( self ) -> Optional[Any]:
a_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.dummy_cond_unet
a_ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
a_ : List[str] = self.dummy_vae
a_ : List[str] = self.dummy_text_encoder
a_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a_ : Union[str, Any] = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Optional[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = "A painting of a squirrel eating a burger"
a_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Union[str, Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a_ : List[str] = output.images
a_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Dict = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> Optional[Any]:
a_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : List[str] = self.dummy_cond_unet
a_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
a_ : Any = self.dummy_vae
a_ : int = self.dummy_text_encoder
a_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a_ : str = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Dict = "A painting of a squirrel eating a burger"
a_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Union[str, Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a_ : Union[str, Any] = output.images
a_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Optional[int] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
a_ : Any = image[0, -3:, -3:, -1]
a_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> List[str]:
a_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , _SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
a_ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A ( self ) -> Union[str, Any]:
a_ : Tuple = self.dummy_cond_unet
a_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
a_ : Tuple = self.dummy_vae
a_ : Optional[Any] = self.dummy_text_encoder
a_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
a_ : Union[str, Any] = unet.half()
a_ : Optional[Any] = vae.half()
a_ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
a_ : Tuple = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Optional[int] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : int = "A painting of a squirrel eating a burger"
a_ : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ) -> Optional[Any]:
a_ : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : str = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
a_ : Optional[int] = 4_0_0_3_6_6_0_3_4_6
a_ : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
a_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : Any = output.images
a_ : Any = image[0, -3:, -3:, -1]
a_ : List[str] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
a_ : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : List[str] = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> Dict:
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = "padme amidala taking a bath artwork, safe for work, no nudity"
a_ : List[Any] = 2_7_3_4_9_7_1_7_5_5
a_ : Tuple = 7
a_ : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Dict = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
a_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : str = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> int:
a_ : Optional[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
a_ : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
a_ : List[str] = 1_0_4_4_3_5_5_2_3_4
a_ : Dict = 1_2
a_ : List[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Tuple = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : Any = output.images
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
a_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : int = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : Tuple = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 473
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowercase = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _A (UpperCamelCase : Any ) ->List[str]:
'''simple docstring'''
lowerCamelCase__ : List[str] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
_lowercase = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _A (UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
lowerCamelCase__ : Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCamelCase__ : Tuple = new_key.replace(UpperCamelCase , UpperCamelCase )
print(f"{key} -> {new_key}" )
lowerCamelCase__ : int = s_dict.pop(UpperCamelCase )
return s_dict
def _A (UpperCamelCase : List[Any] ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ : int = emb.weight.shape
lowerCamelCase__ : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCamelCase__ : List[str] = emb.weight.data
return lin_layer
def _A (UpperCamelCase : str , UpperCamelCase : str ) ->bytes:
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCamelCase__ : Any = os.path.basename(UpperCamelCase )
lowerCamelCase__ : int = url.split("""/""" )[-2]
lowerCamelCase__ : Optional[int] = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not os.path.isfile(UpperCamelCase ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(UpperCamelCase ):
lowerCamelCase__ : Dict = open(UpperCamelCase , """rb""" ).read()
if hashlib.shaaaa(UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(UpperCamelCase ) as source, open(UpperCamelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=UpperCamelCase , unit_divisor=1024 ) as loop:
while True:
lowerCamelCase__ : Tuple = source.read(8192 )
if not buffer:
break
output.write(UpperCamelCase )
loop.update(len(UpperCamelCase ) )
lowerCamelCase__ : Any = open(UpperCamelCase , """rb""" ).read()
if hashlib.shaaaa(UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _A (UpperCamelCase : Dict , UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowerCamelCase__ : Optional[Any] = _download(_MODELS[checkpoint_path] )
else:
lowerCamelCase__ : Optional[Any] = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = original_checkpoint["""dims"""]
lowerCamelCase__ : int = original_checkpoint["""model_state_dict"""]
lowerCamelCase__ : Optional[int] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : str = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowerCamelCase__ : List[str] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=UpperCamelCase , decoder_ffn_dim=UpperCamelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
lowerCamelCase__ : List[str] = WhisperForConditionalGeneration(UpperCamelCase )
lowerCamelCase__ : str = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : int = proj_out_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowercase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 703
|
from ... import PretrainedConfig
_lowercase = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __A ( A_ ):
UpperCamelCase :Optional[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase :int = '''nezha'''
def __init__(self , __magic_name__=21128 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=64 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1E-12 , __magic_name__=0.1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Optional[Any] = max_relative_position
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : Union[str, Any] = use_cache
| 96
| 0
|
import warnings
from .generation import TFGenerationMixin
class a ( _A ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , _A , )
| 144
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , __snake_case : Tuple , __snake_case : Tuple=7 , __snake_case : int=3 , __snake_case : List[str]=30 , __snake_case : Optional[Any]=4_00 , __snake_case : Dict=True , __snake_case : Optional[Any]=None , __snake_case : Dict=True , __snake_case : Any=[0.5, 0.5, 0.5] , __snake_case : Tuple=[0.5, 0.5, 0.5] , __snake_case : Any=True , __snake_case : str=1 / 2_55 , __snake_case : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def lowerCamelCase_ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self : int , __snake_case : Union[str, Any] , __snake_case : List[Any]=False ):
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = self.size['''shortest_edge''']
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(__snake_case , key=lambda __snake_case : item[0] )[0]
UpperCAmelCase_ = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = DetaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = DetaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_rescale''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
# prepare image and target
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
UpperCAmelCase_ = DetaImageProcessor()
UpperCAmelCase_ = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
UpperCAmelCase_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
UpperCAmelCase_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def lowerCamelCase_ ( self : int ):
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
UpperCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase_ = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase_ = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
UpperCAmelCase_ = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
UpperCAmelCase_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
UpperCAmelCase_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 144
| 1
|
"""simple docstring"""
from math import sqrt
def __a ( _lowercase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( _lowercase = 10001 ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Union[str, Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(_lowercase ):
count += 1
while count != nth:
number += 2
if is_prime(_lowercase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def __a ( _lowercase , _lowercase , _lowercase=None , **_lowercase ):
"""simple docstring"""
lowerCamelCase__ : str = [x.strip() for x in open(_lowercase ).readlines()]
lowerCamelCase__ : str = [x.strip() for x in open(_lowercase ).readlines()][: len(_lowercase )]
lowerCamelCase__ : Optional[int] = calculate_rouge(_lowercase , _lowercase , **_lowercase )
if save_path is not None:
save_json(_lowercase , _lowercase , indent=_lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 121
| 1
|
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = " " ):
snake_case_ = []
snake_case_ = 0
for index, char in enumerate(a_ ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case_ = index + 1
elif index + 1 == len(a_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 39
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''wavlm'''
def __init__( self : Optional[int] , lowerCAmelCase__ : int=3_2 , lowerCAmelCase__ : Any=7_6_8 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : List[Any]=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-5 , lowerCAmelCase__ : Optional[Any]="group" , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ : int=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : str=1_2_8 , lowerCAmelCase__ : str=1_6 , lowerCAmelCase__ : Tuple=3_2_0 , lowerCAmelCase__ : Optional[int]=8_0_0 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Union[str, Any]=0.05 , lowerCAmelCase__ : Union[str, Any]=1_0 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Union[str, Any]=1_0 , lowerCAmelCase__ : List[str]=3_2_0 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=1_0_0 , lowerCAmelCase__ : Union[str, Any]=2_5_6 , lowerCAmelCase__ : Tuple=2_5_6 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Any="mean" , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Optional[int]=2_5_6 , lowerCAmelCase__ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCAmelCase__ : str=(5, 3, 3, 1, 1) , lowerCAmelCase__ : Optional[Any]=(1, 2, 3, 1, 1) , lowerCAmelCase__ : List[str]=5_1_2 , lowerCAmelCase__ : Tuple=8_0 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Dict , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : int = feat_extract_norm
_UpperCAmelCase : Optional[int] = feat_extract_activation
_UpperCAmelCase : List[str] = list(lowerCAmelCase__ )
_UpperCAmelCase : Any = list(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = list(lowerCAmelCase__ )
_UpperCAmelCase : int = conv_bias
_UpperCAmelCase : Union[str, Any] = num_buckets
_UpperCAmelCase : Dict = max_bucket_distance
_UpperCAmelCase : int = num_conv_pos_embeddings
_UpperCAmelCase : str = num_conv_pos_embedding_groups
_UpperCAmelCase : Optional[Any] = len(self.conv_dim )
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = hidden_dropout
_UpperCAmelCase : Optional[Any] = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : int = feat_proj_dropout
_UpperCAmelCase : List[str] = final_dropout
_UpperCAmelCase : Optional[Any] = layerdrop
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = num_ctc_classes
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[Any] = do_stable_layer_norm
_UpperCAmelCase : str = use_weighted_layer_sum
_UpperCAmelCase : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : str = apply_spec_augment
_UpperCAmelCase : List[str] = mask_time_prob
_UpperCAmelCase : Tuple = mask_time_length
_UpperCAmelCase : Optional[int] = mask_time_min_masks
_UpperCAmelCase : str = mask_feature_prob
_UpperCAmelCase : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase : List[Any] = num_codevectors_per_group
_UpperCAmelCase : Tuple = num_codevector_groups
_UpperCAmelCase : Optional[Any] = contrastive_logits_temperature
_UpperCAmelCase : Optional[int] = num_negatives
_UpperCAmelCase : List[Any] = codevector_dim
_UpperCAmelCase : int = proj_codevector_dim
_UpperCAmelCase : Optional[Any] = diversity_loss_weight
# ctc loss
_UpperCAmelCase : Dict = ctc_loss_reduction
_UpperCAmelCase : int = ctc_zero_infinity
# adapter
_UpperCAmelCase : Optional[Any] = add_adapter
_UpperCAmelCase : Tuple = adapter_kernel_size
_UpperCAmelCase : str = adapter_stride
_UpperCAmelCase : Optional[Any] = num_adapter_layers
_UpperCAmelCase : Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : Union[str, Any] = list(lowerCAmelCase__ )
_UpperCAmelCase : str = list(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = list(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = xvector_output_dim
@property
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 494
| 0
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =WavaVecaPhonemeCTCTokenizer
lowerCamelCase : Dict =False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Optional[int] = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__lowerCAmelCase : str = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowerCAmelCase : Optional[Any] = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : str=20 , lowerCAmelCase : Union[str, Any]=5 ) -> Tuple[str, list]:
"""simple docstring"""
__lowerCAmelCase : int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase )) for i in range(len(lowerCAmelCase ) )]
__lowerCAmelCase : Dict = list(filter(lambda lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase ) , lowerCAmelCase ) )
if max_length is not None and len(lowerCAmelCase ) > max_length:
__lowerCAmelCase : int = toks[:max_length]
if min_length is not None and len(lowerCAmelCase ) < min_length and len(lowerCAmelCase ) > 0:
while len(lowerCAmelCase ) < min_length:
__lowerCAmelCase : int = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase : List[Any] = tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
if " " not in output_txt and len(lowerCAmelCase ) > 1:
__lowerCAmelCase : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase )
)
if with_prefix_space:
__lowerCAmelCase : Any = """ """ + output_txt
__lowerCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__lowerCAmelCase : int = tokenizer("""m xxx ɪ""" , do_phonemize=lowerCAmelCase ).input_ids
self.assertEqual(lowerCAmelCase , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__lowerCAmelCase : Optional[Any] = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowerCAmelCase ).input_ids
self.assertEqual(lowerCAmelCase , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
__lowerCAmelCase : int = tokenizer("""maɪ c""" , do_phonemize=lowerCAmelCase ).input_ids
self.assertEqual(lowerCAmelCase , [3, 2_00] ) # mai should be <unk> (=3)
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowerCAmelCase : Tuple = """Hello how are you"""
__lowerCAmelCase : List[str] = tokenizer.phonemize(lowerCAmelCase , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowerCAmelCase : str = """Hello how are you"""
__lowerCAmelCase : int = tokenizer.phonemize(lowerCAmelCase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase ).input_ids , tokenizer(lowerCAmelCase , do_phonemize=lowerCAmelCase ).input_ids )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowerCAmelCase : int = """Hello how are you"""
__lowerCAmelCase : Optional[Any] = tokenizer.phonemize(lowerCAmelCase , phonemizer_lang="""en-us""" )
__lowerCAmelCase : Dict = tokenizer.decode(tokenizer(lowerCAmelCase ).input_ids )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowerCAmelCase : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowerCAmelCase : List[Any] = tokenizer.decode(sample_ids[0] )
__lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , batch_tokens[0] )
self.assertEqual(lowerCAmelCase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : int = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowerCAmelCase : List[Any] = """Hello how are you"""
__lowerCAmelCase : List[str] = tokenizer.phonemize(lowerCAmelCase , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowerCAmelCase : int = """Hello how are you"""
__lowerCAmelCase : Optional[int] = tokenizer.phonemize(lowerCAmelCase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase ).input_ids , tokenizer(lowerCAmelCase , do_phonemize=lowerCAmelCase ).input_ids )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__lowerCAmelCase : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowerCAmelCase : Any = tokenizer.decode(sample_ids[0] )
__lowerCAmelCase : Tuple = tokenizer.batch_decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , batch_tokens[0] )
self.assertEqual(lowerCAmelCase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__lowerCAmelCase : Optional[int] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase )
__lowerCAmelCase : Dict = tokenizer.batch_decode(lowerCAmelCase , filter_word_delimiter_token=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , batch_tokens[0] )
self.assertEqual(lowerCAmelCase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowerCAmelCase : Any = """Hello how are you"""
__lowerCAmelCase : str = tokenizer.phonemize(lowerCAmelCase , phonemizer_lang="""en-us""" )
__lowerCAmelCase : List[str] = tokenizer.decode(tokenizer(lowerCAmelCase ).input_ids , filter_word_delimiter_token=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowerCAmelCase : int = """Hello how are you"""
__lowerCAmelCase : Optional[int] = tokenizer.phonemize(lowerCAmelCase , phonemizer_lang="""en-us""" )
__lowerCAmelCase : Tuple = tokenizer.decode(tokenizer(lowerCAmelCase ).input_ids , filter_word_delimiter_token=lowerCAmelCase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowerCAmelCase )
__lowerCAmelCase : List[Any] = """Hello how are you"""
__lowerCAmelCase : int = tokenizer(lowerCAmelCase , phonemizer_lang="""en-us""" ).input_ids
__lowerCAmelCase : str = tokenizer(lowerCAmelCase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = tokenizer.decode(lowerCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowerCAmelCase , """ɛ l o h aʊ a ʁ j u""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowerCAmelCase : Union[str, Any] = """Hello how Are you"""
__lowerCAmelCase : int = """hello how are you"""
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase ).input_ids
__lowerCAmelCase : Dict = tokenizer(lowerCAmelCase ).input_ids
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__lowerCAmelCase : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
__lowerCAmelCase : Tuple = tokenizer.batch_decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowerCAmelCase : str = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowerCAmelCase : List[Any] = tokenizer.decode(lowerCAmelCase , output_char_offsets=lowerCAmelCase , filter_word_delimiter_token=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] ):
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase ) )
# transform list to ModelOutput
__lowerCAmelCase : str = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
[recursive_check(lowerCAmelCase , lowerCAmelCase ) for la, la in zip(lowerCAmelCase , lowerCAmelCase )]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__lowerCAmelCase : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowerCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase , output_char_offsets=lowerCAmelCase )
__lowerCAmelCase : Dict = [tokenizer.decode(lowerCAmelCase , output_char_offsets=lowerCAmelCase ) for ids in sample_ids]
check_list_tuples_equal(lowerCAmelCase , lowerCAmelCase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : int = tokenizer.vocab_size
__lowerCAmelCase : int = len(lowerCAmelCase )
self.assertNotEqual(lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCAmelCase : int = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowerCAmelCase : str = tokenizer.add_tokens(lowerCAmelCase )
__lowerCAmelCase : int = tokenizer.vocab_size
__lowerCAmelCase : Union[str, Any] = len(lowerCAmelCase )
self.assertNotEqual(lowerCAmelCase , 0 )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , len(lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , all_size + len(lowerCAmelCase ) )
__lowerCAmelCase : int = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowerCAmelCase )
self.assertGreaterEqual(len(lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCAmelCase : Optional[Any] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowerCAmelCase : Optional[Any] = tokenizer.add_special_tokens(lowerCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.vocab_size
__lowerCAmelCase : Dict = len(lowerCAmelCase )
self.assertNotEqual(lowerCAmelCase , 0 )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , len(lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , all_size_a + len(lowerCAmelCase ) )
__lowerCAmelCase : str = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowerCAmelCase )
self.assertGreaterEqual(len(lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.get_tokenizers(fast=lowerCAmelCase , do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : Union[str, Any] = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_string(lowerCAmelCase )
self.assertIsInstance(output["""text"""] , lowerCAmelCase )
| 218
|
def snake_case_ (__A : int = 1_0**9 ) -> int:
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCAmelCase : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 218
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Any = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase : str = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCamelCase : Optional[Any] = dict(zip(A__ ,range(len(A__))))
__lowerCamelCase : str = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCamelCase : Optional[Any] = {'''unk_token''': '''<unk>'''}
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file ,'w' ,encoding='utf-8') as fp:
fp.write(json.dumps(A__) + '\n')
with open(self.merges_file ,'w' ,encoding='utf-8') as fp:
fp.write('\n'.join(A__))
__lowerCamelCase : Tuple = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
__lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname ,A__)
with open(self.image_processor_file ,'w' ,encoding='utf-8') as fp:
json.dump(A__ ,A__)
def lowerCAmelCase ( self : Any ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,pad_token='!' ,**A__)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,pad_token='!' ,**A__)
def lowerCAmelCase ( self : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname ,**A__)
def lowerCAmelCase ( self : Optional[int]):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Tuple = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
__lowerCamelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(A__ ,0 ,-1)) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : List[str] = self.get_rust_tokenizer()
__lowerCamelCase : str = self.get_image_processor()
__lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=A__ ,image_processor=A__)
processor_slow.save_pretrained(self.tmpdirname)
__lowerCamelCase : Any = OwlViTProcessor.from_pretrained(self.tmpdirname ,use_fast=A__)
__lowerCamelCase : List[str] = OwlViTProcessor(tokenizer=A__ ,image_processor=A__)
processor_fast.save_pretrained(self.tmpdirname)
__lowerCamelCase : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer ,A__)
self.assertIsInstance(processor_fast.tokenizer ,A__)
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor ,A__)
self.assertIsInstance(processor_fast.image_processor ,A__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__lowerCamelCase : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)')
__lowerCamelCase : List[str] = self.get_image_processor(do_normalize=A__)
__lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=A__)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer ,A__)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor ,A__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Dict = self.get_image_processor()
__lowerCamelCase : int = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=A__ ,image_processor=A__)
__lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCamelCase : Optional[int] = image_processor(A__ ,return_tensors='np')
__lowerCamelCase : Optional[int] = processor(images=A__ ,return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = self.get_image_processor()
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=A__ ,image_processor=A__)
__lowerCamelCase : Tuple = '''lower newer'''
__lowerCamelCase : Optional[Any] = processor(text=A__ ,return_tensors='np')
__lowerCamelCase : Union[str, Any] = tokenizer(A__ ,return_tensors='np')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() ,encoded_processor[key][0].tolist())
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = self.get_image_processor()
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=A__ ,image_processor=A__)
__lowerCamelCase : Tuple = '''lower newer'''
__lowerCamelCase : str = self.prepare_image_inputs()
__lowerCamelCase : Any = processor(text=A__ ,images=A__)
self.assertListEqual(list(inputs.keys()) ,['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(A__):
processor()
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[Any] = '''google/owlvit-base-patch32'''
__lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(A__)
__lowerCamelCase : List[str] = ['''cat''', '''nasa badge''']
__lowerCamelCase : Tuple = processor(text=A__)
__lowerCamelCase : Dict = 1_6
self.assertListEqual(list(inputs.keys()) ,['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape ,(2, seq_length))
# test if it raises when no input is passed
with pytest.raises(A__):
processor()
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[Any] = '''google/owlvit-base-patch32'''
__lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(A__)
__lowerCamelCase : str = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCamelCase : Tuple = processor(text=A__)
__lowerCamelCase : Any = 1_6
__lowerCamelCase : Tuple = len(A__)
__lowerCamelCase : str = max([len(A__) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) ,['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape ,(batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(A__):
processor()
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = '''google/owlvit-base-patch32'''
__lowerCamelCase : List[str] = OwlViTProcessor.from_pretrained(A__)
__lowerCamelCase : str = ['''cat''', '''nasa badge''']
__lowerCamelCase : Any = processor(text=A__)
__lowerCamelCase : Any = 1_6
__lowerCamelCase : List[str] = inputs['''input_ids''']
__lowerCamelCase : List[str] = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) ,['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape ,(2, seq_length))
self.assertListEqual(list(input_ids[0]) ,predicted_ids[0])
self.assertListEqual(list(input_ids[1]) ,predicted_ids[1])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[str] = self.get_image_processor()
__lowerCamelCase : Tuple = self.get_tokenizer()
__lowerCamelCase : Any = OwlViTProcessor(tokenizer=A__ ,image_processor=A__)
__lowerCamelCase : Tuple = self.prepare_image_inputs()
__lowerCamelCase : Optional[int] = self.prepare_image_inputs()
__lowerCamelCase : int = processor(images=A__ ,query_images=A__)
self.assertListEqual(list(inputs.keys()) ,['query_pixel_values', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(A__):
processor()
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = self.get_image_processor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : int = OwlViTProcessor(tokenizer=A__ ,image_processor=A__)
__lowerCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase : str = processor.batch_decode(A__)
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(A__)
self.assertListEqual(A__ ,A__)
| 652
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DistilBertTokenizer
def __init__( self : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Tuple=True , A__ : List[Any]="[UNK]" , A__ : List[str]="[SEP]" , A__ : Tuple="[PAD]" , A__ : Optional[int]="[CLS]" , A__ : Union[str, Any]="[MASK]" , A__ : List[str]=True , A__ : Any=None , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A__ ) != tokenize_chinese_chars
):
a__ : int = getattr(A__ , normalizer_state.pop('''type''' ) )
a__ : List[Any] = do_lower_case
a__ : str = strip_accents
a__ : List[str] = tokenize_chinese_chars
a__ : Dict = normalizer_class(**A__ )
a__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Dict=None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : str , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 688
| 0
|
import numpy as np
def snake_case_ (_a : np.ndarray , _a : float ):
return np.where(vector > 0 , _a , (alpha * (np.exp(_a ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _a ( unittest.TestCase ):
def __init__( self : List[Any] , lowercase : Dict , lowercase : List[str]=13 , lowercase : str=7 , lowercase : List[str]=True , lowercase : List[str]=True , lowercase : Optional[Any]=True , lowercase : Optional[Any]=True , lowercase : Any=99 , lowercase : Any=32 , lowercase : Any=5 , lowercase : Tuple=4 , lowercase : List[Any]=37 , lowercase : List[Any]="gelu" , lowercase : int=0.1 , lowercase : Any=0.1 , lowercase : Optional[int]=512 , lowercase : List[str]=16 , lowercase : Union[str, Any]=2 , lowercase : int=0.02 , lowercase : int=4 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _a ( __a , unittest.TestCase ):
__a : Any = True
__a : str = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def A ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowercase )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class _a ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowercase )
UpperCAmelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
UpperCAmelCase = model(lowercase )[0]
UpperCAmelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowercase )
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowercase )
UpperCAmelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
UpperCAmelCase = model(lowercase )[0]
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
| 358
| 0
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21
|
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[0] * len(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
_UpperCamelCase =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCamelCase =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCamelCase =j
return prefix_result
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return max(prefix_function(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 1_00_00_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = limit + 1
_UpperCamelCase = [0] * limit
for first_term in range(1, __snake_case ):
for n in range(__snake_case, __snake_case, __snake_case ):
_UpperCamelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_UpperCamelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 703
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_a = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_a = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_a = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_a = F"""down_blocks.{i}.resnets.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_a = F"""down_blocks.{i}.attentions.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_a = F"""up_blocks.{i}.resnets.{j}."""
_a = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_a = F"""up_blocks.{i}.attentions.{j}."""
_a = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_a = F"""down_blocks.{i}.downsamplers.0.conv."""
_a = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_a = """mid_block.attentions.0."""
_a = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_a = F"""mid_block.resnets.{j}."""
_a = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_a = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_a = F"""encoder.down_blocks.{i}.resnets.{j}."""
_a = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_a = F"""down_blocks.{i}.downsamplers.0."""
_a = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_a = F"""decoder.up_blocks.{i}.resnets.{j}."""
_a = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_a = F"""mid_block.resnets.{i}."""
_a = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_a = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
return w.reshape(*w.shape, 1, 1 )
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCamelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
_UpperCamelCase = reshape_weight_for_sd(__snake_case )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_a = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_a = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_a = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_UpperCamelCase = k[: -len('''.q_proj.weight''' )]
_UpperCamelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_UpperCamelCase = k[: -len('''.q_proj.bias''' )]
_UpperCamelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
return new_state_dict
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_a = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_a = load_file(unet_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_a = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_a = load_file(vae_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_a = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_a = load_file(text_enc_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_a = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_a = convert_unet_state_dict(unet_state_dict)
_a = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_a = convert_vae_state_dict(vae_state_dict)
_a = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_a = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_a = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_a = convert_text_enc_state_dict_vaa(text_enc_dict)
_a = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_a = convert_text_enc_state_dict(text_enc_dict)
_a = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_a = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_a = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_a = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78
| 0
|
import os
import numpy
import onnx
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Union[str, Any] = a.name
A_ : List[str] = b.name
A_ : Any = ""
A_ : List[str] = ""
A_ : Dict = a == b
A_ : Optional[int] = name_a
A_ : str = name_b
return res
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ) -> Any:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase , _lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCAmelCase , _lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ) -> Optional[int]:
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> Optional[Any]:
A_ : Tuple = list(model.graph.initializer )
A_ : Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A_ : str = inits[i].name
A_ : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : str ) -> Tuple:
A_ : Dict = os.path.dirname(_lowerCAmelCase )
A_ : Optional[int] = os.path.basename(_lowerCAmelCase )
A_ : List[Any] = onnx.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
A_ : List[Any] = list(model.graph.initializer )
A_ : Union[str, Any] = set()
A_ : Optional[Any] = {}
A_ : Optional[Any] = []
A_ : Any = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
A_ : Union[str, Any] = inits[j].data_type
A_ : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _lowerCAmelCase )
total_reduced_size += mem_size
A_ : List[str] = inits[i].name
A_ : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
A_ : Tuple = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB" )
A_ : Optional[Any] = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Any = "optimized_" + model_file_name
A_ : List[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
onnx.save(_lowerCAmelCase , _lowerCAmelCase )
return new_model
| 454
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCAmelCase__ = [144, 192, 240]
UpperCAmelCase__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
UpperCAmelCase__ = [96, 120, 144]
UpperCAmelCase__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
UpperCAmelCase__ = [64, 80, 96]
UpperCAmelCase__ = [16, 16, 24, 48, 64, 80, 320]
UpperCAmelCase__ = 0.05
UpperCAmelCase__ = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 21
UpperCAmelCase__ = """pascal-voc-id2label.json"""
else:
UpperCAmelCase__ = 1000
UpperCAmelCase__ = """imagenet-1k-id2label.json"""
UpperCAmelCase__ = """huggingface/label-files"""
UpperCAmelCase__ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=False ):
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
UpperCAmelCase__ = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
UpperCAmelCase__ = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
UpperCAmelCase__ = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
UpperCAmelCase__ = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
UpperCAmelCase__ = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
UpperCAmelCase__ = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
UpperCAmelCase__ = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
UpperCAmelCase__ = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
UpperCAmelCase__ = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
UpperCAmelCase__ = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase__ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase__ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
UpperCAmelCase__ = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
UpperCAmelCase__ = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
UpperCAmelCase__ = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
UpperCAmelCase__ = name.replace(F'''.global_rep.{i}.weight''' , """.layernorm.weight""" )
if F'''.global_rep.{i}.bias''' in name:
UpperCAmelCase__ = name.replace(F'''.global_rep.{i}.bias''' , """.layernorm.bias""" )
if ".global_rep." in name:
UpperCAmelCase__ = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
UpperCAmelCase__ = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
UpperCAmelCase__ = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
UpperCAmelCase__ = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
UpperCAmelCase__ = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
UpperCAmelCase__ = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
UpperCAmelCase__ = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
UpperCAmelCase__ = """mobilevit.""" + name
return name
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
if base_model:
UpperCAmelCase__ = """"""
else:
UpperCAmelCase__ = """mobilevit."""
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(_lowerCAmelCase )
if key[:8] == "encoder.":
UpperCAmelCase__ = key[8:]
if "qkv" in key:
UpperCAmelCase__ = key.split(""".""" )
UpperCAmelCase__ = int(key_split[0][6:] ) - 1
UpperCAmelCase__ = int(key_split[3] )
UpperCAmelCase__ = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
UpperCAmelCase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCAmelCase__ = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = val
return orig_state_dict
def lowercase ( ):
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
UpperCAmelCase__ = get_mobilevit_config(_lowerCAmelCase )
# load original state_dict
UpperCAmelCase__ = torch.load(_lowerCAmelCase , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
UpperCAmelCase__ = MobileViTForSemanticSegmentation(_lowerCAmelCase ).eval()
else:
UpperCAmelCase__ = MobileViTForImageClassification(_lowerCAmelCase ).eval()
UpperCAmelCase__ = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCAmelCase__ = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCAmelCase__ = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCAmelCase__ = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
UpperCAmelCase__ = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
UpperCAmelCase__ = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
UpperCAmelCase__ = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
UpperCAmelCase__ = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
UpperCAmelCase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowerCAmelCase , organization="""apple""" )
model.push_to_hub(_lowerCAmelCase , organization="""apple""" )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 392
| 0
|
from collections import Counter
from timeit import timeit
def lowerCAmelCase_ ( lowerCamelCase = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def lowerCAmelCase_ ( lowerCamelCase = "" ):
if len(lowerCamelCase ) == 0:
return True
__magic_name__ : Union[str, Any] =input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__magic_name__ : dict[str, int] ={}
for character in lower_case_input_str:
__magic_name__ : int =character_freq_dict.get(lowerCamelCase , 0 ) + 1
__magic_name__ : List[Any] =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCAmelCase_ ( lowerCamelCase = "" ):
print("""\nFor string = """ , lowerCamelCase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowerCamelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase_ : Union[str, Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 719
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase_ : str = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 367
| 0
|
"""simple docstring"""
def a ( __snake_case : list ):
'''simple docstring'''
if len(__snake_case ) <= 1:
return lst
UpperCAmelCase_ :Union[str, Any] = 1
while i < len(__snake_case ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_ ,UpperCAmelCase_ :Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ :str = 1
return lst
if __name__ == "__main__":
__lowerCamelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCamelCase = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 608
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class _snake_case :
'''simple docstring'''
def __init__( self : Dict , snake_case : int , snake_case : MutableSequence[float] ):
if len(snake_case ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase_ :list[float] = list(snake_case )
UpperCAmelCase_ :str = degree
def __add__( self : Any , snake_case : Polynomial ):
if self.degree > polynomial_a.degree:
UpperCAmelCase_ :int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , snake_case )
else:
UpperCAmelCase_ :Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , snake_case )
def __sub__( self : List[str] , snake_case : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , snake_case : Polynomial ):
UpperCAmelCase_ :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , snake_case )
def snake_case_ ( self : Optional[Any] , snake_case : int | float ):
UpperCAmelCase_ :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[Any] ):
UpperCAmelCase_ :List[str] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(snake_case )
return polynomial
def __repr__( self : int ):
return self.__str__()
def snake_case_ ( self : str ):
UpperCAmelCase_ :list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ :str = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , snake_case )
def snake_case_ ( self : Optional[int] , snake_case : int | float = 0 ):
UpperCAmelCase_ :list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ :List[str] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ :Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , snake_case )
def __eq__( self : int , snake_case : object ):
if not isinstance(snake_case , snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , snake_case : object ):
return not self.__eq__(snake_case )
| 608
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase__ :
def __init__( self : Optional[Any] , lowercase__ : str , lowercase__ : List[Any]=13 , lowercase__ : List[str]=7 , lowercase__ : Optional[Any]=False , lowercase__ : List[Any]=True , lowercase__ : Tuple=False , lowercase__ : Any=True , lowercase__ : int=33 , lowercase__ : Any=32 , lowercase__ : int=5 , lowercase__ : int=4 , lowercase__ : Optional[int]=37 , lowercase__ : Union[str, Any]="gelu" , lowercase__ : Optional[int]=0.1 , lowercase__ : str=0.1 , lowercase__ : Optional[Any]=5_12 , lowercase__ : List[Any]=16 , lowercase__ : Any=2 , lowercase__ : int=0.0_2 , lowercase__ : str=3 , lowercase__ : List[Any]=4 , lowercase__ : List[str]=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : int ):
_lowerCAmelCase = EsmModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ )
_lowerCAmelCase = model(lowercase__ )
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : int ):
_lowerCAmelCase = EsmForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Tuple , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Dict , lowercase__ : str ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = EsmForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =False
UpperCamelCase__ =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ =()
UpperCamelCase__ =(
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =True
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = EsmModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = EsmModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()[0]
_lowerCAmelCase = EsmEmbeddings(config=lowercase__ )
_lowerCAmelCase = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_lowerCAmelCase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_lowerCAmelCase = create_position_ids_from_input_ids(lowercase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase__ , lowercase__ ) ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()[0]
_lowerCAmelCase = EsmEmbeddings(config=lowercase__ )
_lowerCAmelCase = torch.empty(2 , 4 , 30 )
_lowerCAmelCase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_lowerCAmelCase = torch.as_tensor([expected_single_positions, expected_single_positions] )
_lowerCAmelCase = embeddings.create_position_ids_from_inputs_embeds(lowercase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase__ , lowercase__ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
pass
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with torch.no_grad():
_lowerCAmelCase = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
_lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(lowercase__ )[0]
_lowerCAmelCase = 33
_lowerCAmelCase = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowercase__ )
_lowerCAmelCase = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
with torch.no_grad():
_lowerCAmelCase = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
_lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCAmelCase = model(lowercase__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4 ) )
| 225
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCAmelCase = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'do_convert_rgb': True,
}
_lowerCAmelCase = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **lowercase__ : Any ):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **lowercase__ : List[Any] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **lowercase__ : Tuple ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase__ )
self.assertIsInstance(processor_fast.tokenizer , lowercase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase__ )
self.assertIsInstance(processor_fast.image_processor , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
_lowerCAmelCase = self.get_image_processor(do_normalize=lowercase__ )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=lowercase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowercase__ , return_tensors='np' )
_lowerCAmelCase = processor(images=lowercase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = 'Alexandra,T-shirt的价格是15便士。'
_lowerCAmelCase = processor(text=lowercase__ )
_lowerCAmelCase = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = 'Alexandra,T-shirt的价格是15便士。'
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(lowercase__ )
_lowerCAmelCase = tokenizer.batch_decode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = 'Alexandra,T-shirt的价格是15便士。'
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 225
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A__(a_ ):
"""simple docstring"""
_A : List[str] = '''vit_msn'''
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.0_2 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ) -> Optional[int]:
super().__init__(**_lowercase )
a_ : Optional[int] = hidden_size
a_ : Any = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : List[Any] = hidden_act
a_ : int = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : int = initializer_range
a_ : Union[str, Any] = layer_norm_eps
a_ : Union[str, Any] = image_size
a_ : Any = patch_size
a_ : Dict = num_channels
a_ : List[Any] = qkv_bias
| 540
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowercase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(_lowercase , """num_encoder_blocks""" ) )
class A__:
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=64 , _lowercase=3 , _lowercase=4 , _lowercase=[2, 2, 2, 2] , _lowercase=[8, 4, 2, 1] , _lowercase=[16, 32, 64, 128] , _lowercase=[1, 4, 8, 16] , _lowercase=[1, 2, 4, 8] , _lowercase=True , _lowercase=True , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0_2 , _lowercase=3 , _lowercase=None , ) -> Any:
a_ : List[str] = parent
a_ : int = batch_size
a_ : Dict = image_size
a_ : Any = num_channels
a_ : Optional[int] = num_encoder_blocks
a_ : Optional[Any] = sr_ratios
a_ : List[str] = depths
a_ : int = hidden_sizes
a_ : List[Any] = downsampling_rates
a_ : List[Any] = num_attention_heads
a_ : Any = is_training
a_ : List[Any] = use_labels
a_ : List[Any] = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : List[str] = initializer_range
a_ : Dict = num_labels
a_ : Union[str, Any] = scope
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : int = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Any:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
a_ : Optional[Any] = SegformerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : List[str] = model(_lowercase )
a_ : int = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
a_ : Any = self.num_labels
a_ : Union[str, Any] = SegformerForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
a_ : str = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
a_ : Optional[Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
a_ : int = 1
a_ : Union[str, Any] = SegformerForSemanticSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : int = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_lowercase )
a_ : Optional[int] = model(_lowercase , labels=_lowercase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Union[str, Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ : Optional[int] = config_and_inputs
a_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_A : List[Any] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Optional[int] = True
_A : Optional[Any] = False
_A : int = False
_A : Optional[int] = False
def UpperCamelCase__ ( self ) -> Dict:
a_ : str = SegformerModelTester(self )
a_ : Optional[int] = SegformerConfigTester(self , config_class=_lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> int:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_lowercase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
def UpperCamelCase__ ( self ) -> Dict:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(_lowercase )
a_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Dict = [*signature.parameters.keys()]
a_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[int] = True
for model_class in self.all_model_classes:
a_ : List[Any] = True
a_ : Optional[Any] = False
a_ : Tuple = True
a_ : Any = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
a_ : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) )
a_ : Union[str, Any] = outputs.attentions
a_ : Optional[int] = sum(self.model_tester.depths )
self.assertEqual(len(_lowercase ) , _lowercase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ : Dict = True
a_ : List[str] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
a_ : int = model(**self._prepare_for_class(_lowercase , _lowercase ) )
a_ : Optional[Any] = outputs.attentions
self.assertEqual(len(_lowercase ) , _lowercase )
# verify the first attentions (first block, first layer)
a_ : List[str] = (self.model_tester.image_size // 4) ** 2
a_ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a_ : int = (self.model_tester.image_size // 32) ** 2
a_ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a_ : Dict = len(_lowercase )
# Check attention is always last and order is fine
a_ : List[str] = True
a_ : List[Any] = True
a_ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
a_ : int = model(**self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + 1 , len(_lowercase ) )
a_ : Optional[int] = outputs.attentions
self.assertEqual(len(_lowercase ) , _lowercase )
# verify the first attentions (first block, first layer)
a_ : int = (self.model_tester.image_size // 4) ** 2
a_ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase__ ( self ) -> Optional[int]:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
a_ : Tuple = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
a_ : Tuple = model(**self._prepare_for_class(_lowercase , _lowercase ) )
a_ : Optional[Any] = outputs.hidden_states
a_ : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_lowercase ) , _lowercase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowercase ):
continue
a_ : Union[str, Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
a_ : List[str] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
a_ : List[Any] = model(**_lowercase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Union[str, Any] = SegformerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
class A__(unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> List[str]:
# only resize + normalize
a_ : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase )
a_ : Dict = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_lowercase )
a_ : str = prepare_img()
a_ : str = image_processor(images=_lowercase , return_tensors="""pt""" )
a_ : List[str] = encoded_inputs.pixel_values.to(_lowercase )
with torch.no_grad():
a_ : Union[str, Any] = model(_lowercase )
a_ : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowercase )
a_ : Any = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowercase , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
# only resize + normalize
a_ : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase )
a_ : int = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(_lowercase )
a_ : Any = prepare_img()
a_ : List[Any] = image_processor(images=_lowercase , return_tensors="""pt""" )
a_ : Optional[int] = encoded_inputs.pixel_values.to(_lowercase )
with torch.no_grad():
a_ : List[str] = model(_lowercase )
a_ : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowercase )
a_ : Any = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowercase , atol=1e-1 ) )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
# only resize + normalize
a_ : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase )
a_ : str = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_lowercase )
a_ : int = prepare_img()
a_ : Any = image_processor(images=_lowercase , return_tensors="""pt""" )
a_ : Optional[int] = encoded_inputs.pixel_values.to(_lowercase )
with torch.no_grad():
a_ : Any = model(_lowercase )
a_ : str = outputs.logits.detach().cpu()
a_ : int = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(500, 300)] )
a_ : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowercase )
a_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
a_ : Optional[Any] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _lowercase )
| 540
| 1
|
import math
def A_ ( snake_case : Optional[Any] ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( snake_case : Union[str, Any] = 10001 ) -> int:
'''simple docstring'''
try:
__UpperCamelCase = int(__SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
__UpperCamelCase = []
__UpperCamelCase = 2
while len(__SCREAMING_SNAKE_CASE ) < nth:
if is_prime(__SCREAMING_SNAKE_CASE ):
primes.append(__SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(__SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(F"{solution() = }")
| 701
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'table-transformer'
_snake_case = ['past_key_values']
_snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_="resnet50" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , )-> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = backbone_config.get('''model_type''' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None, None, None
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = version.parse('1.11' )
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-5
@property
def A__ ( self )-> int:
'''simple docstring'''
return 12
| 451
| 0
|
'''simple docstring'''
def A_( A : int):
UpperCamelCase , UpperCamelCase = [], []
while len(A) > 1:
UpperCamelCase , UpperCamelCase = min(A), max(A)
start.append(A)
end.append(A)
collection.remove(A)
collection.remove(A)
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase : Dict = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 3
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : Tuple =logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case__ ):
_a : List[str] = ['pixel_values']
def __init__( self : List[Any] , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 2_55 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = True , **lowerCamelCase : Optional[Any] , ):
super().__init__(**lowerCamelCase )
lowerCamelCase_ : List[Any] = size if size is not None else {'height': 3_84, 'width': 3_84}
lowerCamelCase_ : List[str] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
lowerCamelCase_ : Dict = do_resize
lowerCamelCase_ : int = size
lowerCamelCase_ : Union[str, Any] = resample
lowerCamelCase_ : Tuple = do_rescale
lowerCamelCase_ : Dict = rescale_factor
lowerCamelCase_ : int = do_normalize
lowerCamelCase_ : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ : int = do_convert_rgb
def __a ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[Any] , ):
lowerCamelCase_ : List[Any] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
lowerCamelCase_ : Optional[Any] = (size['height'], size['width'])
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self : Union[str, Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Union[str, Any] , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self : str , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[int] , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self : List[Any] , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Dict[str, int]] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : bool = None , lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase : Any , ):
lowerCamelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : int = resample if resample is not None else self.resample
lowerCamelCase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : str = image_std if image_std is not None else self.image_std
lowerCamelCase_ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ : Union[str, Any] = size if size is not None else self.size
lowerCamelCase_ : Optional[int] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
lowerCamelCase_ : Dict = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ : int = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ : List[Any] = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
lowerCamelCase_ : Optional[int] = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase_ : Dict = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
lowerCamelCase_ : Optional[int] = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
lowerCamelCase_ : Optional[int] = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
lowerCamelCase_ : Optional[int] = BatchFeature(data={'pixel_values': images} , tensor_type=lowerCamelCase )
return encoded_outputs
| 364
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''rwkv'''
UpperCamelCase__ ={'''max_position_embeddings''': '''context_length'''}
def __init__( self : Union[str, Any] , lowerCamelCase_ : Any=50277 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : str=32 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[str]=1E-5 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Optional[int]=0 , lowerCamelCase_ : List[Any]=6 , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=True , **lowerCamelCase_ : Optional[int] , ) -> Tuple:
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : Any = context_length
__magic_name__ : Tuple = hidden_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__magic_name__ : List[str] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__magic_name__ : str = layer_norm_epsilon
__magic_name__ : List[Any] = rescale_every
__magic_name__ : Any = use_cache
__magic_name__ : str = bos_token_id
__magic_name__ : Tuple = eos_token_id
super().__init__(
tie_word_embeddings=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
| 501
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__lowerCamelCase : Dict[Optional[str], str] = {}
__lowerCamelCase : Dict[Optional[str], Exception] = {}
def lowercase__ ( __A: type ,__A: Optional[str] ,__A: Optional[List[str]] = None ,):
'''simple docstring'''
__magic_name__ : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__magic_name__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__magic_name__ : Optional[int] = format_type
def lowercase__ ( __A: Exception ,__A: Optional[str] ,__A: Optional[List[str]] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__magic_name__ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__lowerCamelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__lowerCamelCase : int = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__lowerCamelCase : str = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def lowercase__ ( __A: Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowercase__ ( __A: Optional[str] ,**__A: Any ):
'''simple docstring'''
__magic_name__ : Tuple = get_format_type_from_alias(__A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 501
| 1
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =split_dict._to_yaml_list()
assert len(lowercase__ ) == len(lowercase__ )
UpperCAmelCase_ =SplitDict._from_yaml_list(lowercase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase_ =None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase_ =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=lowercase__ ), SplitInfo(dataset_name="my_dataset" )] )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 54
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
'''simple docstring'''
import math
_A = 1_0
_A = 7
_A = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 20 ):
lowercase_ : int = math.comb(lowerCamelCase_ , lowerCamelCase_ )
lowercase_ : List[str] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase_ )
lowercase_ : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 713
|
'''simple docstring'''
import math
import os
import sys
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = ''
try:
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as binary_file:
lowercase_ : Dict = binary_file.read()
for dat in data:
lowercase_ : Union[str, Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lexicon.pop(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = last_match_id
if math.loga(SCREAMING_SNAKE_CASE_ ).is_integer():
for curr_key in lexicon:
lowercase_ : Tuple = '0' + lexicon[curr_key]
lowercase_ : Any = bin(SCREAMING_SNAKE_CASE_ )[2:]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : Dict = {'0': '0', '1': '1'}
lowercase_ ,lowercase_ : Any = '', ''
lowercase_ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ : str = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
index += 1
lowercase_ : List[Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowercase_ : Optional[Any] = lexicon[curr_string]
result += last_match_id
return result
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = os.path.getsize(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = bin(SCREAMING_SNAKE_CASE_ )[2:]
lowercase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = 8
try:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as opened_file:
lowercase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(SCREAMING_SNAKE_CASE_ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = read_file_binary(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = compress_data(SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = add_file_length(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
write_file_binary(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 438
| 0
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
def get_masked_lm_array(__lowerCAmelCase ):
snake_case__ = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
if "kernel" in name:
snake_case__ = array.transpose()
return torch.from_numpy(__lowerCAmelCase )
def get_encoder_array(__lowerCAmelCase ):
snake_case__ = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
if "kernel" in name:
snake_case__ = array.transpose()
return torch.from_numpy(__lowerCAmelCase )
def get_encoder_layer_array(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
if "kernel" in name:
snake_case__ = array.transpose()
return torch.from_numpy(__lowerCAmelCase )
def get_encoder_attention_layer_array(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = array.reshape(__lowerCAmelCase )
if "kernel" in name:
snake_case__ = array.transpose()
return torch.from_numpy(__lowerCAmelCase )
print(F"""Loading model based on config from {config_path}...""" )
snake_case__ = BertConfig.from_json_file(__lowerCAmelCase )
snake_case__ = BertForMaskedLM(__lowerCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
snake_case__ = model.bert.encoder.layer[layer_index]
# Self-attention
snake_case__ = layer.attention.self
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
snake_case__ = layer.attention.output
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_attention_layer_norm/gamma''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_attention_layer_norm/beta''' )
# Intermediate
snake_case__ = layer.intermediate
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_intermediate_dense/kernel''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_intermediate_dense/bias''' )
# Output
snake_case__ = layer.output
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_output_dense/kernel''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_output_dense/bias''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_output_layer_norm/gamma''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_output_layer_norm/beta''' )
# Embeddings
snake_case__ = get_encoder_array('''_position_embedding_layer/embeddings''' )
snake_case__ = get_encoder_array('''_type_embedding_layer/embeddings''' )
snake_case__ = get_encoder_array('''_embedding_norm_layer/gamma''' )
snake_case__ = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
snake_case__ = model.cls.predictions.transform
snake_case__ = get_masked_lm_array('''dense/kernel''' )
snake_case__ = get_masked_lm_array('''dense/bias''' )
snake_case__ = get_masked_lm_array('''layer_norm/gamma''' )
snake_case__ = get_masked_lm_array('''layer_norm/beta''' )
snake_case__ = get_masked_lm_array('''embedding_table''' )
# Pooling
snake_case__ = BertPooler(config=__lowerCAmelCase )
snake_case__ = get_encoder_array('''_pooler_layer/kernel''' )
snake_case__ = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(__lowerCAmelCase )
# Integration test - should load without any errors ;)
snake_case__ = BertForMaskedLM.from_pretrained(__lowerCAmelCase )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowerCamelCase__ : int = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 33
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ) -> Any:
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase_ , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_A = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self ) -> str:
_A = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
assert image.shape[0] == 2
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_A = unet.half()
_A = text_encoder.half()
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , ).images
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase ( self ) -> List[str]:
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , output_type="""np""" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 401
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase__ ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] ):
'''simple docstring'''
a__ : Any = to_pil_image(lowerCAmelCase__ )
a__ : Optional[int] = pil_image.size
a__ : List[Any] = pytesseract.image_to_data(lowerCAmelCase__ , lang=lowerCAmelCase__ , output_type="dict" , config=lowerCAmelCase__ )
a__ : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
a__ : Dict = [idx for idx, word in enumerate(lowerCAmelCase__ ) if not word.strip()]
a__ : List[str] = [word for idx, word in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
a__ : Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
a__ : Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
a__ : List[str] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
a__ : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
a__ : Optional[int] = []
for x, y, w, h in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Dict = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase__ )
# finally, normalize the bounding boxes
a__ : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : str = ["pixel_values"]
def __init__( self : int , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : float = 1 / 2_55 , a_ : bool = True , a_ : Union[float, Iterable[float]] = None , a_ : Union[float, Iterable[float]] = None , a_ : bool = True , a_ : Optional[str] = None , a_ : Optional[str] = "" , **a_ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**a_ )
a__ : Optional[Any] = size if size is not None else {"height": 2_24, "width": 2_24}
a__ : Union[str, Any] = get_size_dict(a_ )
a__ : int = do_resize
a__ : List[str] = size
a__ : int = resample
a__ : Union[str, Any] = do_rescale
a__ : Tuple = rescale_value
a__ : Dict = do_normalize
a__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
a__ : Dict = apply_ocr
a__ : Any = ocr_lang
a__ : Tuple = tesseract_config
def UpperCAmelCase ( self : Optional[int] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ) -> np.ndarray:
'''simple docstring'''
a__ : Any = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
a__ : Any = (size["height"], size["width"])
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def UpperCAmelCase ( self : List[str] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ) -> np.ndarray:
'''simple docstring'''
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def UpperCAmelCase ( self : str , a_ : np.ndarray , a_ : Union[float, Iterable[float]] , a_ : Union[float, Iterable[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def UpperCAmelCase ( self : str , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : int=None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Union[float, Iterable[float]] = None , a_ : Union[float, Iterable[float]] = None , a_ : bool = None , a_ : Optional[str] = None , a_ : Optional[str] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : str , ) -> PIL.Image.Image:
'''simple docstring'''
a__ : Tuple = do_resize if do_resize is not None else self.do_resize
a__ : Optional[int] = size if size is not None else self.size
a__ : Tuple = get_size_dict(a_ )
a__ : List[str] = resample if resample is not None else self.resample
a__ : str = do_rescale if do_rescale is not None else self.do_rescale
a__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
a__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
a__ : Optional[int] = image_std if image_std is not None else self.image_std
a__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
a__ : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
a__ : Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
a__ : List[Any] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
a__ : Tuple = [to_numpy_array(a_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
a__ : List[Any] = []
a__ : Any = []
for image in images:
a__ : int = apply_tesseract(a_ , a_ , a_ )
words_batch.append(a_ )
boxes_batch.append(a_ )
if do_resize:
a__ : Dict = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_rescale:
a__ : Dict = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
a__ : Optional[int] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
a__ : Optional[Any] = [to_channel_dimension_format(a_ , a_ ) for image in images]
a__ : int = BatchFeature(data={"pixel_values": images} , tensor_type=a_ )
if apply_ocr:
a__ : Tuple = words_batch
a__ : List[str] = boxes_batch
return data
| 712
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : str = "bart"
__lowerCamelCase : Dict = ["past_key_values"]
__lowerCamelCase : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , a_ : Union[str, Any]=5_02_65 , a_ : List[str]=10_24 , a_ : str=12 , a_ : Union[str, Any]=40_96 , a_ : Tuple=16 , a_ : List[str]=12 , a_ : int=40_96 , a_ : Tuple=16 , a_ : int=0.0 , a_ : Optional[int]=0.0 , a_ : Dict="gelu" , a_ : Optional[int]=10_24 , a_ : Tuple=0.1 , a_ : str=0.0 , a_ : str=0.0 , a_ : Optional[Any]=0.02 , a_ : Any=0.0 , a_ : int=False , a_ : Dict=True , a_ : List[Any]=3 , a_ : Tuple=1 , a_ : Optional[Any]=0 , a_ : Any=2 , a_ : List[Any]=True , a_ : Dict=2 , a_ : List[str]=2 , **a_ : Dict , ) -> Dict:
'''simple docstring'''
a__ : List[Any] = vocab_size
a__ : Dict = max_position_embeddings
a__ : Optional[Any] = d_model
a__ : Optional[Any] = encoder_ffn_dim
a__ : Union[str, Any] = encoder_layers
a__ : Union[str, Any] = encoder_attention_heads
a__ : Tuple = decoder_ffn_dim
a__ : Union[str, Any] = decoder_layers
a__ : Union[str, Any] = decoder_attention_heads
a__ : Optional[Any] = dropout
a__ : str = attention_dropout
a__ : Dict = activation_dropout
a__ : List[Any] = activation_function
a__ : Dict = init_std
a__ : Dict = encoder_layerdrop
a__ : List[Any] = decoder_layerdrop
a__ : List[Any] = classifier_dropout
a__ : Union[str, Any] = use_cache
a__ : Any = encoder_layers
a__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a_ ):
a__ : List[Any] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed." )
class __UpperCAmelCase ( _UpperCamelCase ):
@property
def UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a__ : Optional[int] = {0: "batch"}
a__ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a__ : Dict = {0: "batch", 1: "decoder_sequence"}
a__ : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
a__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a__ , a__ : Any = self.num_layers
for i in range(a_ ):
a__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
a__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
a__ : List[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : int = super().outputs
else:
a__ : Tuple = super(a_ , self ).outputs
if self.use_past:
a__ , a__ : List[str] = self.num_layers
for i in range(a_ ):
a__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
a__ : List[str] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCAmelCase ( self : Dict , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
# Generate decoder inputs
a__ : Optional[Any] = seq_length if not self.use_past else 1
a__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
a__ : Union[str, Any] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
a__ : str = dict(**a_ , **a_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a__ , a__ : Union[str, Any] = common_inputs["input_ids"].shape
a__ : List[Any] = common_inputs["decoder_input_ids"].shape[1]
a__ , a__ : Tuple = self.num_attention_heads
a__ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a__ : List[str] = decoder_seq_length + 3
a__ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a__ : List[Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a_ , a_ )] , dim=1 )
a__ : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a__ , a__ : Any = self.num_layers
a__ : Dict = min(a_ , a_ )
a__ : Optional[int] = max(a_ , a_ ) - min_num_layers
a__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(a_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
) )
# TODO: test this.
a__ : List[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(a_ , a_ ):
common_inputs["past_key_values"].append((torch.zeros(a_ ), torch.zeros(a_ )) )
return common_inputs
def UpperCAmelCase ( self : Optional[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a__ , a__ : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a__ : Any = seqlen + 2
a__ , a__ : Any = self.num_layers
a__ , a__ : List[Any] = self.num_attention_heads
a__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a__ : str = common_inputs["attention_mask"].dtype
a__ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
a__ : Optional[Any] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(a_ )
]
return common_inputs
def UpperCAmelCase ( self : List[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Any = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ : Any = tokenizer.num_special_tokens_to_add(a_ )
a__ : Dict = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
a__ : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
a__ : Any = dict(tokenizer(a_ , return_tensors=a_ ) )
return common_inputs
def UpperCAmelCase ( self : List[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
elif self.task == "causal-lm":
a__ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
else:
a__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
return common_inputs
def UpperCAmelCase ( self : Optional[Any] , a_ : int , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : List[Any] ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : int = super()._flatten_past_key_values_(a_ , a_ , a_ , a_ )
else:
a__ : Union[str, Any] = super(a_ , self )._flatten_past_key_values_(
a_ , a_ , a_ , a_ )
| 251
| 0
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = AutoencoderKL
UpperCamelCase_ : Any = '''sample'''
UpperCamelCase_ : int = 1e-2
@property
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = 4
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Optional[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def _A ( self : Any ):
return (3, 32, 32)
@property
def _A ( self : str ):
return (3, 32, 32)
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
def _A ( self : Optional[int] ):
pass
def _A ( self : Union[str, Any] ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _A ( self : Optional[int] ):
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_class(**UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE : List[Any] = model(**UpperCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn_like(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE : Any = self.model_class(**UpperCAmelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE : List[str] = model_a(**UpperCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE : Optional[int] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
SCREAMING_SNAKE_CASE : Any = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE : Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
SCREAMING_SNAKE_CASE : Optional[int] = model.to(UpperCAmelCase_ )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE : Any = image.to(UpperCAmelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ , generator=UpperCAmelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase_ ) for s in shape] )}.npy'''
def _A ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Tuple , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Dict=(4, 3, 512, 512) , UpperCAmelCase_ : Any=False ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase_ , UpperCAmelCase_ ) ) ).to(UpperCAmelCase_ ).to(UpperCAmelCase_ )
return image
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Any="CompVis/stable-diffusion-v1-4" , UpperCAmelCase_ : List[str]=False ):
SCREAMING_SNAKE_CASE : int = "fp16" if fpaa else None
SCREAMING_SNAKE_CASE : List[str] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : Any = AutoencoderKL.from_pretrained(
UpperCAmelCase_ , subfolder="vae" , torch_dtype=UpperCAmelCase_ , revision=UpperCAmelCase_ , )
model.to(UpperCAmelCase_ ).eval()
return model
def _A ( self : Optional[Any] , UpperCAmelCase_ : int=0 ):
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase_ )
return torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def _A ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_image(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def _A ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(UpperCAmelCase_ , fpaa=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : str = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def _A ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : Tuple = self.get_sd_image(UpperCAmelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : str = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : str = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def _A ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : Any = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : int = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _A ( self : Optional[int] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model.decode(UpperCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model.decode(UpperCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def _A ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_image(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(UpperCAmelCase_ ).latent_dist
SCREAMING_SNAKE_CASE : List[str] = dist.sample(generator=UpperCAmelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ )
| 62
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 108
| 0
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
snake_case_ = 'bert-base-cased'
snake_case_ = 'fp16'
snake_case_ = 'bf16'
snake_case_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Any ):
"""simple docstring"""
super().setUp()
__snake_case = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def a (self : str ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_UpperCAmelCase ):
__snake_case = self.dist_env.copy()
__snake_case = f"""{i + 1}"""
__snake_case = strategy
with mockenv_context(**_UpperCAmelCase ):
__snake_case = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def a (self : List[str] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_UpperCAmelCase ):
__snake_case = self.dist_env.copy()
__snake_case = prefetch_policy
with mockenv_context(**_UpperCAmelCase ):
__snake_case = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def a (self : Optional[Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_UpperCAmelCase ):
__snake_case = self.dist_env.copy()
__snake_case = state_dict_type
with mockenv_context(**_UpperCAmelCase ):
__snake_case = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = AutoModel.from_pretrained(_UpperCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
__snake_case = self.dist_env.copy()
__snake_case = policy
if policy == "TRANSFORMER_BASED_WRAP":
__snake_case = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
__snake_case = '''2000'''
with mockenv_context(**_UpperCAmelCase ):
__snake_case = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__snake_case = self.dist_env.copy()
__snake_case = '''TRANSFORMER_BASED_WRAP'''
__snake_case = '''T5Layer'''
with mockenv_context(**_UpperCAmelCase ):
__snake_case = FullyShardedDataParallelPlugin()
with self.assertRaises(_UpperCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
__snake_case = self.dist_env.copy()
__snake_case = '''SIZE_BASED_WRAP'''
__snake_case = '''0'''
with mockenv_context(**_UpperCAmelCase ):
__snake_case = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def a (self : List[Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__snake_case = self.dist_env.copy()
__snake_case = mp_dtype
with mockenv_context(**_UpperCAmelCase ):
__snake_case = Accelerator()
if mp_dtype == "fp16":
__snake_case = torch.floataa
elif mp_dtype == "bf16":
__snake_case = torch.bfloataa
__snake_case = MixedPrecision(param_dtype=_UpperCAmelCase , reduce_dtype=_UpperCAmelCase , buffer_dtype=_UpperCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _UpperCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _UpperCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_UpperCAmelCase )
def a (self : Optional[Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__snake_case = self.dist_env.copy()
__snake_case = str(_UpperCAmelCase ).lower()
with mockenv_context(**_UpperCAmelCase ):
__snake_case = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_UpperCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : List[Any] ):
"""simple docstring"""
super().setUp()
__snake_case = 0.8_2
__snake_case = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
__snake_case = {
'''multi_gpu_fp16''': 3200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__snake_case = 160
__snake_case = 160
__snake_case = inspect.getfile(accelerate.test_utils )
__snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
__snake_case = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
__snake_case = cmd.copy()
for i, strategy in enumerate(_UpperCAmelCase ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
def a (self : Any ):
"""simple docstring"""
__snake_case = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
__snake_case = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(_UpperCAmelCase ):
__snake_case = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
__snake_case = len(_UpperCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__snake_case = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
__snake_case = cmd_config[:-1]
__snake_case = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
def a (self : Dict ):
"""simple docstring"""
__snake_case = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
__snake_case = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__snake_case = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(_UpperCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 714
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = 'new-model'
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[Any] = NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''bert-base-cased'''
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''bert-base-cased'''
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForPreTraining.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForCausalLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForCausalLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : str ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForMaskedLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForMaskedLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForSequenceClassification.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : str ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForQuestionAnswering.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
@require_tensorflow_probability
def a (self : List[str] ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(
a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(a__ , a__ )
__snake_case = copy.deepcopy(model.config )
__snake_case = ['''FunnelBaseModel''']
__snake_case = TFAutoModel.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case = TFAutoModel.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def a (self : str ):
"""simple docstring"""
try:
AutoConfig.register('''new-model''' , a__ )
__snake_case = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
auto_class.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = BertModelTester(self ).get_config()
__snake_case = NewModelConfig(**tiny_config.to_dict() )
__snake_case = auto_class.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case = auto_class.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a (self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__snake_case = TFAutoModel.from_pretrained('''bert-base''' )
def a (self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case = TFAutoModel.from_pretrained(a__ , revision='''aaaaaa''' )
def a (self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def a (self : int ):
"""simple docstring"""
with self.assertRaisesRegex(a__ , '''Use `from_pt=True` to load this model''' ):
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__snake_case = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 388
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE:str = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE:str = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
self.assertTrue(isinstance(dc.token_ids ,SCREAMING_SNAKE_CASE__ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self : Any ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE:str = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ ) # fails here
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Dict = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE:Optional[Any] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = dc.update(1 )
SCREAMING_SNAKE_CASE:Any = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Union[str, Any] = dc.update(2 )
SCREAMING_SNAKE_CASE:Tuple = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = dc.update(3 )
SCREAMING_SNAKE_CASE:int = stepped is True and completed is True and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE:Tuple = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 143
|
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = [1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = 0, 0, 0
SCREAMING_SNAKE_CASE:List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE:Union[str, Any] = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE:Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
SCREAMING_SNAKE_CASE:int = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_00) = }''')
| 143
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "gpt_neo"
UpperCAmelCase_ = ["past_key_values"]
UpperCAmelCase_ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : List[Any], _UpperCAmelCase : List[Any]=5_0_2_5_7, _UpperCAmelCase : Dict=2_0_4_8, _UpperCAmelCase : int=2_0_4_8, _UpperCAmelCase : List[str]=2_4, _UpperCAmelCase : Dict=[[["global", "local"], 1_2]], _UpperCAmelCase : Optional[Any]=1_6, _UpperCAmelCase : List[str]=None, _UpperCAmelCase : Union[str, Any]=2_5_6, _UpperCAmelCase : Any="gelu_new", _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : str=0.0, _UpperCAmelCase : List[Any]=0.0, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Union[str, Any]=1E-5, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Dict=True, _UpperCAmelCase : int=5_0_2_5_6, _UpperCAmelCase : List[str]=5_0_2_5_6, **_UpperCAmelCase : Dict, ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = window_size
SCREAMING_SNAKE_CASE__ : int = activation_function
SCREAMING_SNAKE_CASE__ : Optional[int] = resid_dropout
SCREAMING_SNAKE_CASE__ : Any = embed_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = use_cache
SCREAMING_SNAKE_CASE__ : Any = bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE__ : int = attention_types
SCREAMING_SNAKE_CASE__ : List[Any] = self.expand_attention_types_params(_UpperCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
@staticmethod
def A_ ( _UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE__ : Optional[int] = input.size()
SCREAMING_SNAKE_CASE__ : str = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = shape[dimension]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = torch.div(sizedim - size , SCREAMING_SNAKE_CASE__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE__ : Any = torch.arange(SCREAMING_SNAKE_CASE__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE__ : int = [slice(SCREAMING_SNAKE_CASE__ )] * rank
SCREAMING_SNAKE_CASE__ : Optional[int] = indices
SCREAMING_SNAKE_CASE__ : List[Any] = input[s]
SCREAMING_SNAKE_CASE__ : Any = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE__ : int = torch.arange(1 , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = torch.remainder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = remainders == 0
SCREAMING_SNAKE_CASE__ : int = candidates[divisor_indices]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(SCREAMING_SNAKE_CASE__ )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rounding_mode="floor" )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@property
def A_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase, direction="inputs" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return self._config.num_heads
def A_ ( self : List[Any], _UpperCAmelCase : PreTrainedTokenizer, _UpperCAmelCase : int = -1, _UpperCAmelCase : int = -1, _UpperCAmelCase : bool = False, _UpperCAmelCase : Optional[TensorType] = None, ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super(_UpperCAmelCase, self ).generate_dummy_inputs(
_UpperCAmelCase, batch_size=_UpperCAmelCase, seq_length=_UpperCAmelCase, is_pair=_UpperCAmelCase, framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE__ : int = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Dict = seqlen + 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE__ : Dict = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE__ : Tuple = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCAmelCase, _UpperCAmelCase, dtype=_UpperCAmelCase )], dim=1 )
return ordered_inputs
@property
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 1_3
| 157
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : Optional[int] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "lm_head"
SCREAMING_SNAKE_CASE__ : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE__ : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Dict = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE__ : str = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : str = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE__ : List[str] = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : str = "weight"
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE__ : Any = name.split("." )
SCREAMING_SNAKE_CASE__ : List[str] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : str = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : List[Any] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : str = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : str = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : int = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : List[str] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : int = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ : Dict = 42
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 43
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : str = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE__ : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 157
| 1
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case : Optional[int] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81
|
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
__UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 1:
__UpperCamelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , __lowercase ):
for _ in range(__lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : int =0
try:
a__ : str =proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 399
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase ( _lowerCamelCase ):
def __init__( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Union[str, Any] = True , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : str = False , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Optional[int] = True , lowerCamelCase_ : int = "arrow" , **lowerCamelCase_ : Optional[Any] , ) -> Optional[int]:
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ : str = load_from_cache_file
__magic_name__ : Dict = file_format
__magic_name__ : Union[str, Any] = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__magic_name__ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 710
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__lowerCamelCase : Dict[Optional[str], str] = {}
__lowerCamelCase : Dict[Optional[str], Exception] = {}
def lowercase__ ( __A: type ,__A: Optional[str] ,__A: Optional[List[str]] = None ,):
'''simple docstring'''
__magic_name__ : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__magic_name__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__magic_name__ : Optional[int] = format_type
def lowercase__ ( __A: Exception ,__A: Optional[str] ,__A: Optional[List[str]] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__magic_name__ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__lowerCamelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__lowerCamelCase : int = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__lowerCamelCase : str = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def lowercase__ ( __A: Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowercase__ ( __A: Optional[str] ,**__A: Any ):
'''simple docstring'''
__magic_name__ : Tuple = get_format_type_from_alias(__A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 501
| 0
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
a = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = EfficientNetConfig()
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""hidden_dim"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""width_coef"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""depth_coef"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""dropout_rate"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""dw_padding"""]
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
__SCREAMING_SNAKE_CASE = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__UpperCAmelCase , )
return preprocessor
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__SCREAMING_SNAKE_CASE = sorted(set(__UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {b: str(__UpperCAmelCase ) for b, i in zip(__UpperCAmelCase , range(__UpperCAmelCase ) )}
__SCREAMING_SNAKE_CASE = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__SCREAMING_SNAKE_CASE = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__SCREAMING_SNAKE_CASE = {}
for item in rename_keys:
if item[0] in original_param_names:
__SCREAMING_SNAKE_CASE = """efficientnet.""" + item[1]
__SCREAMING_SNAKE_CASE = """classifier.weight"""
__SCREAMING_SNAKE_CASE = """classifier.bias"""
return key_mapping
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__SCREAMING_SNAKE_CASE = key_mapping[key]
if "_conv" in key and "kernel" in key:
__SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.transpose(__UpperCAmelCase ) )
else:
__SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_classes[model_name](
include_top=__UpperCAmelCase , weights="""imagenet""" , input_tensor=__UpperCAmelCase , input_shape=__UpperCAmelCase , pooling=__UpperCAmelCase , classes=1000 , classifier_activation="""softmax""" , )
__SCREAMING_SNAKE_CASE = original_model.trainable_variables
__SCREAMING_SNAKE_CASE = original_model.non_trainable_variables
__SCREAMING_SNAKE_CASE = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__SCREAMING_SNAKE_CASE = param.numpy()
__SCREAMING_SNAKE_CASE = list(tf_params.keys() )
# Load HuggingFace model
__SCREAMING_SNAKE_CASE = get_efficientnet_config(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = EfficientNetForImageClassification(__UpperCAmelCase ).eval()
__SCREAMING_SNAKE_CASE = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__SCREAMING_SNAKE_CASE = rename_keys(__UpperCAmelCase )
replace_params(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__SCREAMING_SNAKE_CASE = convert_image_processor(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits.detach().numpy()
# Original model inference
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
__SCREAMING_SNAKE_CASE = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__SCREAMING_SNAKE_CASE = image.img_to_array(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.expand_dims(__UpperCAmelCase , axis=0 )
__SCREAMING_SNAKE_CASE = original_model.predict(__UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(__UpperCAmelCase ):
os.mkdir(__UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(__UpperCAmelCase )
preprocessor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
__SCREAMING_SNAKE_CASE = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__UpperCAmelCase )
hf_model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
a = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 109
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = tempfile.mkdtemp()
# fmt: off
a_ : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
a_ : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
a_ : Tuple = {"""unk_token""": """<unk>"""}
a_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
a_ : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
a_ : int = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a_ : Tuple = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.get_tokenizer()
a_ : Any = self.get_rust_tokenizer()
a_ : Any = self.get_image_processor()
a_ : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
a_ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
a_ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a_ : Any = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
a_ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.get_image_processor()
a_ : List[str] = self.get_tokenizer()
a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : List[str] = self.prepare_image_inputs()
a_ : Dict = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
a_ : List[Any] = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = self.get_image_processor()
a_ : List[Any] = self.get_tokenizer()
a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : Optional[Any] = """lower newer"""
a_ : List[str] = processor(text=lowerCAmelCase_ )
a_ : Union[str, Any] = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.get_image_processor()
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Optional[Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : Dict = """lower newer"""
a_ : Optional[int] = self.prepare_image_inputs()
a_ : str = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = self.get_image_processor()
a_ : str = self.get_tokenizer()
a_ : Union[str, Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ : int = processor.batch_decode(lowerCAmelCase_ )
a_ : int = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = self.get_image_processor()
a_ : List[str] = self.get_tokenizer()
a_ : int = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : Dict = """lower newer"""
a_ : Optional[Any] = self.prepare_image_inputs()
a_ : Any = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 577
| 0
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase = 10
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for i in range(lowercase_ , lowercase_ ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : str = len(lowercase_ )
while left <= right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = (left + right) // 3 + 1
__UpperCAmelCase : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCAmelCase : Any = one_third - 1
elif array[two_third] < target:
__UpperCAmelCase : Any = two_third + 1
else:
__UpperCAmelCase : Union[str, Any] = one_third + 1
__UpperCAmelCase : Any = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
__UpperCAmelCase : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase_ , one_third - 1 , lowercase_ , lowercase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase_ , lowercase_ , lowercase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase_ , lowercase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase = ite_ternary_search(collection, target)
lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print("""Not found""")
| 675
|
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case_ (__A : List[str] , __A : List[Any]=False ) -> int:
try:
__lowerCAmelCase : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCAmelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCAmelCase : List[Any] = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__UpperCAmelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
__UpperCAmelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
__UpperCAmelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
__UpperCAmelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
__UpperCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
__UpperCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
__UpperCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
__UpperCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
__UpperCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
__UpperCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
__UpperCAmelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def snake_case_ (__A : List[Any] ) -> Tuple:
try:
import faiss # noqa
except ImportError:
__lowerCAmelCase : Union[str, Any] = unittest.skip("""test requires faiss""" )(__A )
return test_case
def snake_case_ (__A : str ) -> Optional[int]:
try:
import regex # noqa
except ImportError:
__lowerCAmelCase : Tuple = unittest.skip("""test requires regex""" )(__A )
return test_case
def snake_case_ (__A : Any ) -> str:
try:
import elasticsearch # noqa
except ImportError:
__lowerCAmelCase : int = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def snake_case_ (__A : str ) -> Optional[Any]:
try:
import sqlalchemy # noqa
except ImportError:
__lowerCAmelCase : Optional[int] = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def snake_case_ (__A : Any ) -> List[str]:
if not config.TORCH_AVAILABLE:
__lowerCAmelCase : Union[str, Any] = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def snake_case_ (__A : Any ) -> int:
if not config.TF_AVAILABLE:
__lowerCAmelCase : str = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def snake_case_ (__A : str ) -> Tuple:
if not config.JAX_AVAILABLE:
__lowerCAmelCase : Optional[Any] = unittest.skip("""test requires JAX""" )(__A )
return test_case
def snake_case_ (__A : Any ) -> Any:
if not config.PIL_AVAILABLE:
__lowerCAmelCase : List[Any] = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def snake_case_ (__A : Any ) -> Optional[Any]:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def snake_case_ (__A : List[Any] ) -> List[Any]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def snake_case_ (__A : Dict ) -> Any:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def snake_case_ (__A : str ) -> str:
def _require_spacy_model(__A : List[Any] ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def snake_case_ (__A : int ) -> Any:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def snake_case_ (__A : str ) -> Optional[int]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def snake_case_ (__A : Dict ) -> Tuple:
if not _run_slow_tests or _run_slow_tests == 0:
__lowerCAmelCase : Optional[int] = unittest.skip("""test is slow""" )(__A )
return test_case
def snake_case_ (__A : Dict ) -> List[str]:
if not _run_local_tests or _run_local_tests == 0:
__lowerCAmelCase : Any = unittest.skip("""test is local""" )(__A )
return test_case
def snake_case_ (__A : Any ) -> Dict:
if not _run_packaged_tests or _run_packaged_tests == 0:
__lowerCAmelCase : Optional[int] = unittest.skip("""test is packaged""" )(__A )
return test_case
def snake_case_ (__A : int ) -> Union[str, Any]:
if not _run_remote_tests or _run_remote_tests == 0:
__lowerCAmelCase : Union[str, Any] = unittest.skip("""test requires remote""" )(__A )
return test_case
def snake_case_ (*__A : Optional[int] ) -> Optional[int]:
def decorate(cls : Dict ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__lowerCAmelCase : Union[str, Any] = decorator(__A )
setattr(cls , __A , __A )
return cls
return decorate
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : int =0
lowerCamelCase : Optional[Any] =1
lowerCamelCase : str =2
@contextmanager
def snake_case_ (__A : Dict=OfflineSimulationMode.CONNECTION_FAILS , __A : Optional[int]=1e-16 ) -> Optional[int]:
__lowerCAmelCase : Tuple = requests.Session().request
def timeout_request(__A : str , __A : Any , __A : Optional[int] , **__A : Optional[Any] ):
# Change the url to an invalid url so that the connection hangs
__lowerCAmelCase : str = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__lowerCAmelCase : int = timeout
try:
return online_request(__A , __A , **__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__lowerCAmelCase : List[Any] = url
__lowerCAmelCase : List[str] = e.args[0]
__lowerCAmelCase : Optional[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" , f'''OfflineMock[{url}]''' ),)
__lowerCAmelCase : str = (max_retry_error,)
raise
def raise_connection_error(__A : Dict , __A : List[Any] , **__A : Optional[int] ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , __A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , __A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def snake_case_ (*__A : Optional[int] , **__A : Dict ) -> int:
__lowerCAmelCase : Optional[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A , **__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def snake_case_ () -> Any:
import gc
gc.collect()
__lowerCAmelCase : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case_ () -> List[Any]:
import gc
gc.collect()
__lowerCAmelCase : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case_ (__A : Union[str, Any] , __A : List[str] ) -> int:
return deepcopy(__A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(__A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def snake_case_ (__A : Optional[int] ) -> Tuple:
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A : List[str] , *__A : Any , **__A : int ):
try:
return func(*__A , **__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper , __A )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = returncode
__lowerCAmelCase : Union[str, Any] = stdout
__lowerCAmelCase : Union[str, Any] = stderr
async def snake_case_ (__A : Tuple , __A : Tuple ) -> Optional[int]:
while True:
__lowerCAmelCase : List[Any] = await stream.readline()
if line:
callback(__A )
else:
break
async def snake_case_ (__A : List[str] , __A : Union[str, Any]=None , __A : List[str]=None , __A : int=None , __A : Union[str, Any]=False , __A : List[Any]=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(__A ) )
__lowerCAmelCase : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Tuple = []
def tee(__A : Union[str, Any] , __A : Union[str, Any] , __A : Dict , __A : str="" ):
__lowerCAmelCase : Any = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A , __A , file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __A : tee(__A , __A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda __A : tee(__A , __A , sys.stderr , label="""stderr:""" ) ),
] , timeout=__A , )
return _RunOutput(await p.wait() , __A , __A )
def snake_case_ (__A : Tuple , __A : Dict=None , __A : Optional[Any]=None , __A : Dict=1_8_0 , __A : Any=False , __A : Dict=True ) -> _RunOutput:
__lowerCAmelCase : str = asyncio.get_event_loop()
__lowerCAmelCase : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) )
__lowerCAmelCase : Tuple = """ """.join(__A )
if result.returncode > 0:
__lowerCAmelCase : Any = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def snake_case_ () -> Any:
__lowerCAmelCase : Union[str, Any] = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
__lowerCAmelCase : List[Any] = re.sub(r"""^gw""" , """""" , __A , 0 , re.M )
return int(__A )
def snake_case_ () -> Any:
__lowerCAmelCase : List[Any] = 2_9_5_0_0
__lowerCAmelCase : Optional[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 651
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Tuple ="camembert"
def __init__( self : Any , lowerCAmelCase : List[str]=3_05_22 , lowerCAmelCase : Any=7_68 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Union[str, Any]=30_72 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Union[str, Any]=5_12 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Union[str, Any]=1e-12 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Dict="absolute" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : Dict = layer_norm_eps
__lowerCAmelCase : Any = position_embedding_type
__lowerCAmelCase : int = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 651
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __lowercase ( lowercase__ ):
'''simple docstring'''
__lowerCAmelCase = '''wav2vec2'''
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.0_5 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="sum" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
__a : Union[str, Any] = hidden_size
__a : Optional[int] = feat_extract_norm
__a : Optional[int] = feat_extract_activation
__a : Dict = list(UpperCAmelCase__ )
__a : str = list(UpperCAmelCase__ )
__a : str = list(UpperCAmelCase__ )
__a : List[Any] = conv_bias
__a : List[Any] = num_conv_pos_embeddings
__a : Dict = num_conv_pos_embedding_groups
__a : Optional[int] = len(self.conv_dim )
__a : Any = num_hidden_layers
__a : int = intermediate_size
__a : List[str] = hidden_act
__a : Dict = num_attention_heads
__a : Tuple = hidden_dropout
__a : Optional[int] = attention_dropout
__a : Tuple = activation_dropout
__a : str = feat_proj_dropout
__a : int = final_dropout
__a : Optional[Any] = layerdrop
__a : Union[str, Any] = layer_norm_eps
__a : List[str] = initializer_range
__a : str = vocab_size
__a : Optional[Any] = do_stable_layer_norm
__a : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : List[Any] = apply_spec_augment
__a : Optional[Any] = mask_time_prob
__a : Union[str, Any] = mask_time_length
__a : Optional[int] = mask_time_min_masks
__a : Union[str, Any] = mask_feature_prob
__a : Any = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : Optional[int] = num_codevectors_per_group
__a : int = num_codevector_groups
__a : Optional[Any] = contrastive_logits_temperature
__a : str = feat_quantizer_dropout
__a : List[str] = num_negatives
__a : int = codevector_dim
__a : Any = proj_codevector_dim
__a : List[Any] = diversity_loss_weight
# ctc loss
__a : Tuple = ctc_loss_reduction
__a : Optional[Any] = ctc_zero_infinity
# adapter
__a : Dict = add_adapter
__a : str = adapter_kernel_size
__a : Optional[Any] = adapter_stride
__a : Union[str, Any] = num_adapter_layers
__a : Union[str, Any] = output_hidden_size or hidden_size
__a : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a : List[str] = list(UpperCAmelCase__ )
__a : List[str] = list(UpperCAmelCase__ )
__a : Dict = list(UpperCAmelCase__ )
__a : Optional[int] = xvector_output_dim
@property
def _lowerCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 700
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A = logging.get_logger(__name__)
A = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''detr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=100 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a : Any = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = backbone_config.get('''model_type''' )
__a : Tuple = CONFIG_MAPPING[backbone_model_type]
__a : Union[str, Any] = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
__a , __a , __a : Union[str, Any] = None, None, None
__a : Union[str, Any] = use_timm_backbone
__a : Any = backbone_config
__a : Tuple = num_channels
__a : int = num_queries
__a : str = d_model
__a : Any = encoder_ffn_dim
__a : int = encoder_layers
__a : Optional[int] = encoder_attention_heads
__a : Any = decoder_ffn_dim
__a : str = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : List[Any] = dropout
__a : Union[str, Any] = attention_dropout
__a : int = activation_dropout
__a : Dict = activation_function
__a : str = init_std
__a : int = init_xavier_std
__a : Optional[Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : str = encoder_layers
__a : List[str] = auxiliary_loss
__a : Optional[Any] = position_embedding_type
__a : Any = backbone
__a : Tuple = use_pretrained_backbone
__a : int = dilation
# Hungarian matcher
__a : str = class_cost
__a : Optional[Any] = bbox_cost
__a : Any = giou_cost
# Loss coefficients
__a : List[str] = mask_loss_coefficient
__a : Dict = dice_loss_coefficient
__a : str = bbox_loss_coefficient
__a : str = giou_loss_coefficient
__a : Any = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self ):
return self.d_model
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(backbone_config=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__a : Tuple = self.backbone_config.to_dict()
__a : Dict = self.__class__.model_type
return output
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-5
@property
def _lowerCamelCase ( self ):
return 12
| 101
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : int = size if size is not None else {'shortest_edge': 224}
__A : Any = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
__A : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='crop_size')
__A : Optional[int] = do_resize
__A : str = size
__A : List[Any] = resample
__A : Dict = do_center_crop
__A : Any = crop_size
__A : Tuple = do_rescale
__A : List[str] = rescale_factor
__A : List[Any] = do_normalize
__A : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__A : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
__A : Optional[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
__A : List[str] = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase)
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = do_resize if do_resize is not None else self.do_resize
__A : int = size if size is not None else self.size
__A : Any = get_size_dict(_UpperCAmelCase , param_name='size' , default_to_square=_UpperCAmelCase)
__A : Dict = resample if resample is not None else self.resample
__A : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : str = crop_size if crop_size is not None else self.crop_size
__A : str = get_size_dict(_UpperCAmelCase , param_name='crop_size' , default_to_square=_UpperCAmelCase)
__A : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__A : Any = image_std if image_std is not None else self.image_std
__A : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__A : int = make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__A : Any = [convert_to_rgb(_UpperCAmelCase) for image in images]
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(_UpperCAmelCase) for image in images]
if do_resize:
__A : Any = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images]
if do_center_crop:
__A : Tuple = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images]
__A : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images]
__A : int = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase)
| 8
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ = model(_snake_case )["last_hidden_state"]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 159
| 0
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Tuple=400 , SCREAMING_SNAKE_CASE : List[Any]=3 , ):
lowercase__ : Optional[Any] = parent
lowercase__ : Any = do_resize
lowercase__ : str = size if size is not None else {"shortest_edge": 288}
lowercase__ : Any = size_divisor
lowercase__ : str = do_rescale
lowercase__ : Any = rescale_factor
lowercase__ : Any = do_normalize
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Tuple = image_mean
lowercase__ : Any = image_std
lowercase__ : Tuple = do_pad
lowercase__ : Tuple = batch_size
lowercase__ : str = num_channels
lowercase__ : List[Any] = min_resolution
lowercase__ : str = max_resolution
def snake_case ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
if not batched:
lowercase__ : int = self.size["shortest_edge"]
lowercase__ : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
lowercase__ , lowercase__ : Union[str, Any] = image.size
else:
lowercase__ , lowercase__ : int = image.shape[1], image.shape[2]
lowercase__ : Any = size / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if h < w:
lowercase__ , lowercase__ : Dict = size, scale * w
else:
lowercase__ , lowercase__ : Tuple = scale * h, size
lowercase__ : Tuple = int((1_333 / 800) * size )
if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > max_size:
lowercase__ : Tuple = max_size / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = newh * scale
lowercase__ : List[str] = neww * scale
lowercase__ , lowercase__ : str = int(newh + 0.5 ), int(neww + 0.5 )
lowercase__ , lowercase__ : str = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowercase__ : List[Any] = []
for image in image_inputs:
lowercase__ , lowercase__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : List[str] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
lowercase__ : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case ( self : Dict ):
lowercase__ : int = BridgeTowerImageProcessingTester(self )
@property
def snake_case ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Dict ):
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size_divisor" ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : List[Any] ):
# Initialize image processor
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : List[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : int ):
# Initialize image processor
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowercase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : Optional[int] ):
# Initialize image processor
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : List[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 81
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81
| 1
|
"""simple docstring"""
import math
import sys
def lowercase__(A ) ->int:
"""simple docstring"""
if number != int(A ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
lowercase__ : str= [-1] * (number + 1)
lowercase__ : str= 0
for i in range(1 , number + 1 ):
lowercase__ : str= sys.maxsize
lowercase__ : List[str]= int(math.sqrt(A ) )
for j in range(1 , root + 1 ):
lowercase__ : Dict= 1 + answers[i - (j**2)]
lowercase__ : Optional[int]= min(A , A )
lowercase__ : Dict= answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
|
"""simple docstring"""
def lowercase__(A ) ->str:
"""simple docstring"""
if isinstance(A , A ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A , A ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
lowercase__ : Tuple= False
if num < 0:
lowercase__ : str= True
lowercase__ : Dict= -num
lowercase__ : list[int]= []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A ) for e in binary )
return "0b" + "".join(str(A ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "gpt_neox"
def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ):
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCamelCase_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , _A )
_UpperCamelCase = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 71
| 0
|
from typing import Any
def __A ( _A ):
"""simple docstring"""
if not input_list:
return []
__a = [input_list.count(_A ) for value in input_list]
__a = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __A ( ):
"""simple docstring"""
__a = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_A )
__a = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
__a = parser.parse_args()
if not hasattr(_A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 197
| 1
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( a : Dict ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase_ ( a : List[str] ):
a__ = np.max(_outputs , axis=-1 , keepdims=a )
a__ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int = 'sigmoid'
SCREAMING_SNAKE_CASE:List[str] = 'softmax'
SCREAMING_SNAKE_CASE:Optional[Any] = 'none'
@add_end_docstrings(
_A , R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[int] = False
SCREAMING_SNAKE_CASE:List[str] = ClassificationFunction.NONE
def __init__( self , **_a ):
"""simple docstring"""
super().__init__(**_a )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowercase__ ( self , _a=None , _a=None , _a="" , **_a ):
"""simple docstring"""
a__ = tokenizer_kwargs
a__ = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
a__ = self.model.config.return_all_scores
if isinstance(_a , _a ) or top_k is None:
a__ = top_k
a__ = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , _a , )
if return_all_scores:
a__ = None
else:
a__ = 1
if isinstance(_a , _a ):
a__ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
a__ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_a , **_a ):
"""simple docstring"""
a__ = super().__call__(*_a , **_a )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
a__ = 'top_k' not in kwargs
if isinstance(args[0] , _a ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowercase__ ( self , _a , **_a ):
"""simple docstring"""
a__ = self.framework
if isinstance(_a , _a ):
return self.tokenizer(**_a , return_tensors=_a , **_a )
elif isinstance(_a , _a ) and len(_a ) == 1 and isinstance(inputs[0] , _a ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_a , **_a )
elif isinstance(_a , _a ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(_a , return_tensors=_a , **_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
return self.model(**_a )
def lowercase__ ( self , _a , _a=None , _a=1 , _a=True ):
"""simple docstring"""
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
a__ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
a__ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
a__ = self.model.config.function_to_apply
else:
a__ = ClassificationFunction.NONE
a__ = model_outputs['logits'][0]
a__ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
a__ = sigmoid(_a )
elif function_to_apply == ClassificationFunction.SOFTMAX:
a__ = softmax(_a )
elif function_to_apply == ClassificationFunction.NONE:
a__ = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
a__ = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(_a )
]
if not _legacy:
dict_scores.sort(key=lambda _a : x["score"] , reverse=_a )
if top_k is not None:
a__ = dict_scores[:top_k]
return dict_scores
| 715
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : int = get_tests_dir('fixtures')
__A : Tuple = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__A : List[Any] = get_tests_dir('fixtures/dummy-config.json')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
a__ = 0
def lowercase__ ( self ):
"""simple docstring"""
a__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_a , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
a__ = AutoFeatureExtractor.from_pretrained(_a ).to_dict()
config_dict.pop('feature_extractor_type' )
a__ = WavaVecaFeatureExtractor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
a__ = AutoFeatureExtractor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
a__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , 'bert-base is not a local folder and is not a valid model identifier' ):
a__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
a__ = AutoFeatureExtractor.from_pretrained(_a , revision='aaaaaa' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
a__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def lowercase__ ( self ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_a )
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
a__ = AutoFeatureExtractor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def lowercase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , _a )
AutoFeatureExtractor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoFeatureExtractor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
a__ = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ):
"""simple docstring"""
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = True
try:
AutoConfig.register('custom' , _a )
AutoFeatureExtractor.register(_a , _a )
# If remote code is not set, the default is to use local
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_a , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 126
| 0
|
_lowerCamelCase : str = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowerCamelCase : Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _lowerCAmelCase ( __magic_name__ :dict[int, list[int]] , __magic_name__ :int , __magic_name__ :list[bool] ):
UpperCAmelCase_ = True
UpperCAmelCase_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def _lowerCAmelCase ( __magic_name__ :dict[int, list[int]] , __magic_name__ :int , __magic_name__ :list[bool] ):
UpperCAmelCase_ = True
UpperCAmelCase_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def _lowerCAmelCase ( __magic_name__ :dict[int, list[int]] ):
UpperCAmelCase_ = len(__magic_name__ ) * [False]
UpperCAmelCase_ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
UpperCAmelCase_ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ = []
UpperCAmelCase_ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
UpperCAmelCase_ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
UpperCAmelCase_ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 121
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = logging.get_logger(__name__)
class snake_case__ ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(__snake_case )
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : List[str] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> int:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Tuple , ) -> Dict:
UpperCAmelCase_ = {}
if truncation is not None:
UpperCAmelCase_ = truncation
UpperCAmelCase_ = generate_kwargs
UpperCAmelCase_ = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase_ = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
return True
def UpperCamelCase ( self : Optional[int] , *lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , lowerCAmelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase_ = ([prefix + arg for arg in args[0]],)
UpperCAmelCase_ = True
elif isinstance(args[0] , lowerCAmelCase_ ):
UpperCAmelCase_ = (prefix + args[0],)
UpperCAmelCase_ = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCAmelCase_ = self.tokenizer(*lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if (
isinstance(args[0] , lowerCAmelCase_ )
and all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for el in args[0] )
and all(len(lowerCAmelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCAmelCase_ : Tuple ) -> str:
UpperCAmelCase_ = self._parse_and_tokenize(lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ )
return inputs
def UpperCamelCase ( self : int , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> Any:
if self.framework == "pt":
UpperCAmelCase_, UpperCAmelCase_ = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase_, UpperCAmelCase_ = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase_ = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase_ = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(lowerCAmelCase_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase_ = self.model.generate(**lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase_ = output_ids.reshape(lowerCAmelCase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=ReturnType.TEXT , lowerCAmelCase_ : List[str]=False ) -> Union[str, Any]:
UpperCAmelCase_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase_ = {
F'''{self.return_name}_text''': self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
}
records.append(lowerCAmelCase_ )
return records
@add_end_docstrings(__snake_case )
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[int] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Tuple ) -> List[str]:
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> bool:
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(__snake_case )
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''translation'''
def UpperCamelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def UpperCamelCase ( self : Tuple , *lowerCAmelCase_ : int , lowerCAmelCase_ : int=TruncationStrategy.DO_NOT_TRUNCATE , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Union[str, Any]=None ) -> Optional[int]:
if getattr(self.tokenizer , '''_build_translation_inputs''' , lowerCAmelCase_ ):
return self.tokenizer._build_translation_inputs(
*lowerCAmelCase_ , return_tensors=self.framework , truncation=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ )
else:
return super()._parse_and_tokenize(*lowerCAmelCase_ , truncation=lowerCAmelCase_ )
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : int ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = super()._sanitize_parameters(**lowerCAmelCase_ )
if src_lang is not None:
UpperCAmelCase_ = src_lang
if tgt_lang is not None:
UpperCAmelCase_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase_ = kwargs.get('''task''' , self.task )
UpperCAmelCase_ = task.split('''_''' )
if task and len(lowerCAmelCase_ ) == 4:
# translation, XX, to YY
UpperCAmelCase_ = items[1]
UpperCAmelCase_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> int:
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 121
| 1
|
from __future__ import annotations
def A__ ( __lowerCamelCase, __lowerCamelCase ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__lowerCamelCase ):
print(F'''{i}\t\t{d}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [float('''inf''' )] * vertex_count
SCREAMING_SNAKE_CASE_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
SCREAMING_SNAKE_CASE_ = distance[u] + w
SCREAMING_SNAKE_CASE_ = check_negative_cycle(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Enter number of vertices: ").strip())
__UpperCAmelCase = int(input("Enter number of edges: ").strip())
__UpperCAmelCase = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__UpperCAmelCase = {"src": src, "dst": dest, "weight": weight}
__UpperCAmelCase = int(input("\nEnter shortest path source:").strip())
__UpperCAmelCase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 704
|
import numpy as np
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE_ = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE_ = ya
SCREAMING_SNAKE_CASE_ = xa
for k in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = f(__lowerCamelCase, y[k] )
SCREAMING_SNAKE_CASE_ = f(x + 0.5 * h, y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE_ = f(x + 0.5 * h, y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE_ = f(x + h, y[k] + h * ka )
SCREAMING_SNAKE_CASE_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
| 0
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
A_ : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
A_ : str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A = HfArgumentParser((ModelArguments,) )
((A) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A = True
A = True
A = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A = decoder_config.decoder_start_token_id
A = decoder_config.pad_token_id
if decoder_start_token_id is None:
A = decoder_config.bos_token_id
if pad_token_id is None:
A = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A = decoder_config.eos_token_id
A = decoder_start_token_id
A = pad_token_id
A = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 106
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 60
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class SCREAMING_SNAKE_CASE_ ( __lowerCamelCase ):
"""simple docstring"""
_a = 'pegasus'
_a = ['past_key_values']
_a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A=5_02_65 , A=10_24 , A=12 , A=40_96 , A=16 , A=12 , A=40_96 , A=16 , A=0.0 , A=0.0 , A=True , A=True , A="gelu" , A=10_24 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=0 , A=False , A=0 , A=1 , A=1 , **A , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
| 714
|
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 678
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
a__ = {"""mobilebert-uncased""": 5_12}
a__ = {}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = VOCAB_FILES_NAMES
snake_case_ : Any = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
snake_case_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : List[Any] = MobileBertTokenizer
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=True , lowerCAmelCase : str="[UNK]" , lowerCAmelCase : Any="[SEP]" , lowerCAmelCase : Union[str, Any]="[PAD]" , lowerCAmelCase : List[str]="[CLS]" , lowerCAmelCase : Any="[MASK]" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase) != tokenize_chinese_chars
):
_snake_case : Any = getattr(lowerCAmelCase , normalizer_state.pop("""type"""))
_snake_case : str = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**lowerCAmelCase)
_snake_case : Optional[Any] = do_lower_case
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
_snake_case : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : str = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_snake_case : List[Any] = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
| 477
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> str:
return EnvironmentCommand()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
return EnvironmentCommand(args.accelerate_config_file )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : ArgumentParser) -> Optional[int]:
"""simple docstring"""
_snake_case : List[Any] = parser.add_parser("""env""")
download_parser.set_defaults(func=lowerCAmelCase)
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase)
def __init__( self : str , lowerCAmelCase : Optional[Any] , *lowerCAmelCase : str) -> None:
"""simple docstring"""
_snake_case : List[Any] = accelerate_config_file
def UpperCamelCase_ ( self : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Optional[Any] = """not installed"""
if is_safetensors_available():
import safetensors
_snake_case : Union[str, Any] = safetensors.__version__
elif importlib.util.find_spec("""safetensors""") is not None:
import safetensors
_snake_case : int = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_snake_case : Dict = """not installed"""
_snake_case : List[str] = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_snake_case : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase):
_snake_case : Optional[Any] = load_config_from_file(self._accelerate_config_file).to_dict()
_snake_case : int = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(lowerCAmelCase , lowerCAmelCase)
else F'''\t{accelerate_config}'''
)
_snake_case : Any = """not installed"""
_snake_case : int = """NA"""
if is_torch_available():
import torch
_snake_case : Dict = torch.__version__
_snake_case : List[Any] = torch.cuda.is_available()
_snake_case : Optional[int] = """not installed"""
_snake_case : Any = """NA"""
if is_tf_available():
import tensorflow as tf
_snake_case : Tuple = tf.__version__
try:
# deprecated in v2.1
_snake_case : int = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_snake_case : Optional[int] = bool(tf.config.list_physical_devices("""GPU"""))
_snake_case : Dict = """not installed"""
_snake_case : Any = """not installed"""
_snake_case : int = """not installed"""
_snake_case : List[str] = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_snake_case : str = flax.__version__
_snake_case : Tuple = jax.__version__
_snake_case : int = jaxlib.__version__
_snake_case : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
_snake_case : List[str] = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""")
print(self.format_dict(lowerCAmelCase))
return info
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : str) -> str:
"""simple docstring"""
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()]) + "\n"
| 477
| 1
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : List[str] , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[str] = None , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(self , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = repo_info
SCREAMING_SNAKE_CASE : Any = token
SCREAMING_SNAKE_CASE : Tuple = None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.dir_cache is None:
SCREAMING_SNAKE_CASE : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCamelCase_ ): {"""name""": str(lowerCamelCase_ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
if not isinstance(self.repo_info , lowerCamelCase_ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
SCREAMING_SNAKE_CASE : Dict = hf_hub_url(self.repo_info.id , lowerCamelCase_ , revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase_ , mode=lowerCamelCase_ , headers=get_authentication_headers_for_url(lowerCamelCase_ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
self._get_dirs()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._strip_protocol(lowerCamelCase_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int=False , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
self._get_dirs()
SCREAMING_SNAKE_CASE : str = PurePosixPath(path.strip("""/""" ) )
SCREAMING_SNAKE_CASE : Optional[int] = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE : List[Any] = PurePosixPath(p.strip("""/""" ) )
SCREAMING_SNAKE_CASE : List[str] = p.parent
if root == path:
SCREAMING_SNAKE_CASE : Dict = f
SCREAMING_SNAKE_CASE : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 708
|
'''simple docstring'''
import math
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = n
SCREAMING_SNAKE_CASE : Optional[int] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = w
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
__UpperCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 79
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 33
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = False ) -> str:
'''simple docstring'''
_lowercase =scheduler
_lowercase =optimizers if isinstance(lowerCAmelCase , (list, tuple) ) else [optimizers]
_lowercase =split_batches
_lowercase =step_with_optimizer
_lowercase =GradientState()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_lowercase =AcceleratorState().num_processes
for _ in range(lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
return self.scheduler.get_last_lr()
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.scheduler.state_dict()
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
self.scheduler.load_state_dict(lowerCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
return self.scheduler.get_lr()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.print_lr(*lowerCAmelCase , **lowerCAmelCase )
| 291
| 0
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Dict = ["image_processor"]
UpperCamelCase : Any = "SamImageProcessor"
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__(__magic_name__ )
_lowerCAmelCase = self.image_processor
_lowerCAmelCase = -1_0
_lowerCAmelCase = self.image_processor.size['longest_edge']
def __call__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = self.image_processor(
__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# pop arguments that are not used in the foward but used nevertheless
_lowerCAmelCase = encoding_image_processor['original_sizes']
if hasattr(__magic_name__ , 'numpy' ): # Checks if Torch or TF tensor
_lowerCAmelCase = original_sizes.numpy()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._check_and_preprocess_points(
input_points=__magic_name__ , input_labels=__magic_name__ , input_boxes=__magic_name__ , )
_lowerCAmelCase = self._normalize_and_convert(
__magic_name__ , __magic_name__ , input_points=__magic_name__ , input_labels=__magic_name__ , input_boxes=__magic_name__ , return_tensors=__magic_name__ , )
return encoding_image_processor
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
_lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __magic_name__ , original_sizes[0] ) for point in input_points
]
else:
_lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __magic_name__ , __magic_name__ )
for point, original_size in zip(__magic_name__ , __magic_name__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowerCAmelCase , _lowerCAmelCase = self._pad_points_and_labels(__magic_name__ , __magic_name__ )
_lowerCAmelCase = np.array(__magic_name__ )
if input_labels is not None:
_lowerCAmelCase = np.array(__magic_name__ )
if input_boxes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
_lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __magic_name__ , original_sizes[0] , is_bounding_box=__magic_name__ )
for box in input_boxes
]
else:
_lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __magic_name__ , __magic_name__ , is_bounding_box=__magic_name__ )
for box, original_size in zip(__magic_name__ , __magic_name__ )
]
_lowerCAmelCase = np.array(__magic_name__ )
if input_boxes is not None:
if return_tensors == "pt":
_lowerCAmelCase = torch.from_numpy(__magic_name__ )
# boxes batch size of 1 by default
_lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowerCAmelCase = tf.convert_to_tensor(__magic_name__ )
# boxes batch size of 1 by default
_lowerCAmelCase = tf.expand_dims(__magic_name__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowerCAmelCase = torch.from_numpy(__magic_name__ )
# point batch size of 1 by default
_lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowerCAmelCase = tf.convert_to_tensor(__magic_name__ )
# point batch size of 1 by default
_lowerCAmelCase = tf.expand_dims(__magic_name__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowerCAmelCase = torch.from_numpy(__magic_name__ )
# point batch size of 1 by default
_lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowerCAmelCase = tf.convert_to_tensor(__magic_name__ )
# point batch size of 1 by default
_lowerCAmelCase = tf.expand_dims(__magic_name__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = max([point.shape[0] for point in input_points] )
_lowerCAmelCase = []
for i, point in enumerate(__magic_name__ ):
if point.shape[0] != expected_nb_points:
_lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_lowerCAmelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__magic_name__ )
_lowerCAmelCase = processed_input_points
return input_points, input_labels
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = original_size
_lowerCAmelCase , _lowerCAmelCase = self.image_processor._get_preprocess_shape(__magic_name__ , longest_edge=__magic_name__ )
_lowerCAmelCase = deepcopy(__magic_name__ ).astype(__magic_name__ )
if is_bounding_box:
_lowerCAmelCase = coords.reshape(-1 , 2 , 2 )
_lowerCAmelCase = coords[..., 0] * (new_w / old_w)
_lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowerCAmelCase = coords.reshape(-1 , 4 )
return coords
def _lowerCamelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(__magic_name__ , 'numpy' ): # Checks for TF or Torch tensor
_lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(input_points[0] , __magic_name__ ):
raise ValueError('Input points must be a list of list of floating points.' )
_lowerCAmelCase = [np.array(__magic_name__ ) for input_point in input_points]
else:
_lowerCAmelCase = None
if input_labels is not None:
if hasattr(__magic_name__ , 'numpy' ):
_lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(input_labels[0] , __magic_name__ ):
raise ValueError('Input labels must be a list of list integers.' )
_lowerCAmelCase = [np.array(__magic_name__ ) for label in input_labels]
else:
_lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__magic_name__ , 'numpy' ):
_lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__magic_name__ , __magic_name__ )
or not isinstance(input_boxes[0] , __magic_name__ )
or not isinstance(input_boxes[0][0] , __magic_name__ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_lowerCAmelCase = [np.array(__magic_name__ ).astype(np.floataa ) for box in input_boxes]
else:
_lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__magic_name__ ) )
def _lowerCamelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
return self.image_processor.post_process_masks(*__magic_name__ , **__magic_name__ )
| 309
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=1_0 , __magic_name__=3 , __magic_name__=2 , __magic_name__=2 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__="divided_space_time" , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_frames
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = attention_type
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_lowerCAmelCase = self.num_labels
return config
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
# verify the logits shape
_lowerCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Optional[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase : int = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Dict = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Tuple = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerModelTester(self )
_lowerCAmelCase = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = self.model_tester.seq_length
_lowerCAmelCase = self.model_tester.num_frames
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_lowerCAmelCase = len(__magic_name__ )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
_lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video', filename='eating_spaghetti.npy', repo_type='dataset' )
_lowerCAmelCase = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__magic_name__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(video[:8] , return_tensors='pt' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
# verify the logits
_lowerCAmelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 309
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=None , ) -> Union[str, Any]:
if attention_mask is None:
__lowerCAmelCase =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __a :
def __init__( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str=13 , snake_case_ : List[Any]=7 , snake_case_ : str=True , snake_case_ : str=False , snake_case_ : List[str]=99 , snake_case_ : Tuple=16 , snake_case_ : Dict=2 , snake_case_ : str=4 , snake_case_ : Tuple=4 , snake_case_ : str="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : int=32 , snake_case_ : List[str]=2 , snake_case_ : List[str]=1 , snake_case_ : List[str]=0 , snake_case_ : Union[str, Any]=0.0_2 , )-> Optional[int]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_labels
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =hidden_act
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =eos_token_id
__lowerCAmelCase =pad_token_id
__lowerCAmelCase =bos_token_id
__lowerCAmelCase =initializer_range
def UpperCamelCase ( self : List[Any])-> int:
__lowerCAmelCase =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
__lowerCAmelCase =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
__lowerCAmelCase =shift_tokens_right(snake_case_ , 1 , 2)
__lowerCAmelCase =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case_ , )
__lowerCAmelCase =prepare_blenderbot_inputs_dict(snake_case_ , snake_case_ , snake_case_)
return config, inputs_dict
def UpperCamelCase ( self : str)-> List[str]:
__lowerCAmelCase , __lowerCAmelCase =self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Union[str, Any])-> str:
__lowerCAmelCase =20
__lowerCAmelCase =model_class_name(snake_case_)
__lowerCAmelCase =model.encode(inputs_dict["""input_ids"""])
__lowerCAmelCase , __lowerCAmelCase =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase =model.init_cache(decoder_input_ids.shape[0] , snake_case_ , snake_case_)
__lowerCAmelCase =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
__lowerCAmelCase =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase =model.decode(
decoder_input_ids[:, :-1] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=snake_case_ , decoder_position_ids=snake_case_ , )
__lowerCAmelCase =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
__lowerCAmelCase =model.decode(
decoder_input_ids[:, -1:] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case_ , )
__lowerCAmelCase =model.decode(snake_case_ , snake_case_)
__lowerCAmelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""")
def UpperCamelCase ( self : List[str] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[int])-> List[Any]:
__lowerCAmelCase =20
__lowerCAmelCase =model_class_name(snake_case_)
__lowerCAmelCase =model.encode(inputs_dict["""input_ids"""])
__lowerCAmelCase , __lowerCAmelCase =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
__lowerCAmelCase =model.init_cache(decoder_input_ids.shape[0] , snake_case_ , snake_case_)
__lowerCAmelCase =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase =model.decode(
decoder_input_ids[:, :-1] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=snake_case_ , decoder_position_ids=snake_case_ , )
__lowerCAmelCase =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
__lowerCAmelCase =model.decode(
decoder_input_ids[:, -1:] , snake_case_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case_ , decoder_position_ids=snake_case_ , )
__lowerCAmelCase =model.decode(snake_case_ , snake_case_ , decoder_attention_mask=snake_case_)
__lowerCAmelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""")
@require_flax
class __a ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 99
def UpperCamelCase ( self : List[Any])-> List[Any]:
__lowerCAmelCase =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase =input_ids.shape[0]
__lowerCAmelCase =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase ( self : List[str])-> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =self._get_config_and_data()
__lowerCAmelCase =FlaxBlenderbotSmallForConditionalGeneration(snake_case_)
__lowerCAmelCase =lm_model(input_ids=snake_case_)
__lowerCAmelCase =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , snake_case_)
def UpperCamelCase ( self : str)-> List[str]:
__lowerCAmelCase =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase =FlaxBlenderbotSmallForConditionalGeneration(snake_case_)
__lowerCAmelCase =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
__lowerCAmelCase =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
__lowerCAmelCase =lm_model(input_ids=snake_case_ , decoder_input_ids=snake_case_)
__lowerCAmelCase =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , snake_case_)
def UpperCamelCase ( self : List[str])-> Tuple:
__lowerCAmelCase =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
__lowerCAmelCase =shift_tokens_right(snake_case_ , 1 , 2)
__lowerCAmelCase =np.equal(snake_case_ , 1).astype(np.floataa).sum()
__lowerCAmelCase =np.equal(snake_case_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(snake_case_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class __a ( SCREAMING_SNAKE_CASE , unittest.TestCase , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase ( self : str)-> Optional[Any]:
__lowerCAmelCase =FlaxBlenderbotSmallModelTester(self)
def UpperCamelCase ( self : Optional[Any])-> int:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case_ , snake_case_ , snake_case_)
def UpperCamelCase ( self : int)-> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case_ , snake_case_ , snake_case_)
def UpperCamelCase ( self : str)-> str:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_)
__lowerCAmelCase =model_class(snake_case_)
@jax.jit
def encode_jitted(snake_case_ : List[Any] , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple):
return model.encode(input_ids=snake_case_ , attention_mask=snake_case_)
with self.subTest("""JIT Enabled"""):
__lowerCAmelCase =encode_jitted(**snake_case_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
__lowerCAmelCase =encode_jitted(**snake_case_).to_tuple()
self.assertEqual(len(snake_case_) , len(snake_case_))
for jitted_output, output in zip(snake_case_ , snake_case_):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCamelCase ( self : Any)-> List[Any]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowerCAmelCase =model_class(snake_case_)
__lowerCAmelCase =model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
__lowerCAmelCase ={
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case_ : str , snake_case_ : int , snake_case_ : str):
return model.decode(
decoder_input_ids=snake_case_ , decoder_attention_mask=snake_case_ , encoder_outputs=snake_case_ , )
with self.subTest("""JIT Enabled"""):
__lowerCAmelCase =decode_jitted(**snake_case_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
__lowerCAmelCase =decode_jitted(**snake_case_).to_tuple()
self.assertEqual(len(snake_case_) , len(snake_case_))
for jitted_output, output in zip(snake_case_ , snake_case_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCamelCase ( self : Optional[int])-> Tuple:
for model_class_name in self.all_model_classes:
__lowerCAmelCase =model_class_name.from_pretrained("""facebook/blenderbot_small-90M""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase =np.ones((1, 1)) * model.config.eos_token_id
__lowerCAmelCase =model(snake_case_)
self.assertIsNotNone(snake_case_)
| 354
|
from collections.abc import Sequence
from queue import Queue
class __a :
def __init__( self : str , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None)-> Optional[int]:
__lowerCAmelCase =start
__lowerCAmelCase =end
__lowerCAmelCase =val
__lowerCAmelCase =(start + end) // 2
__lowerCAmelCase =left
__lowerCAmelCase =right
def __repr__( self : List[Any])-> Dict:
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __a :
def __init__( self : List[str] , snake_case_ : Sequence , snake_case_ : int)-> str:
__lowerCAmelCase =collection
__lowerCAmelCase =function
if self.collection:
__lowerCAmelCase =self._build_tree(0 , len(snake_case_) - 1)
def UpperCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : Optional[int])-> Optional[int]:
self._update_tree(self.root , snake_case_ , snake_case_)
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : Union[str, Any])-> Union[str, Any]:
return self._query_range(self.root , snake_case_ , snake_case_)
def UpperCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[str])-> List[Any]:
if start == end:
return SegmentTreeNode(snake_case_ , snake_case_ , self.collection[start])
__lowerCAmelCase =(start + end) // 2
__lowerCAmelCase =self._build_tree(snake_case_ , snake_case_)
__lowerCAmelCase =self._build_tree(mid + 1 , snake_case_)
return SegmentTreeNode(snake_case_ , snake_case_ , self.fn(left.val , right.val) , snake_case_ , snake_case_)
def UpperCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : int , snake_case_ : int)-> List[Any]:
if node.start == i and node.end == i:
__lowerCAmelCase =val
return
if i <= node.mid:
self._update_tree(node.left , snake_case_ , snake_case_)
else:
self._update_tree(node.right , snake_case_ , snake_case_)
__lowerCAmelCase =self.fn(node.left.val , node.right.val)
def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : str , snake_case_ : List[Any])-> Optional[int]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , snake_case_ , snake_case_)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , snake_case_ , node.mid) , self._query_range(node.right , node.mid + 1 , snake_case_) , )
else:
# range in right child tree
return self._query_range(node.right , snake_case_ , snake_case_)
def UpperCamelCase ( self : List[str])-> Tuple:
if self.root is not None:
__lowerCAmelCase =Queue()
queue.put(self.root)
while not queue.empty():
__lowerCAmelCase =queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
lowercase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 354
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Dict = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCAmelCase : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCAmelCase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''')
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
return new_state_dict
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = ''''''
if is_panoptic:
__lowerCAmelCase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""")
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""")
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:2_5_6, :]
__lowerCAmelCase = in_proj_bias[:2_5_6]
__lowerCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
__lowerCAmelCase = in_proj_bias[2_5_6:5_1_2]
__lowerCAmelCase = in_proj_weight[-2_5_6:, :]
__lowerCAmelCase = in_proj_bias[-2_5_6:]
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCAmelCase = '''resnet101'''
if "dc5" in model_name:
__lowerCAmelCase = True
__lowerCAmelCase = '''panoptic''' in model_name
if is_panoptic:
__lowerCAmelCase = 2_5_0
else:
__lowerCAmelCase = 9_1
__lowerCAmelCase = '''huggingface/label-files'''
__lowerCAmelCase = '''coco-detection-id2label.json'''
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='''dataset'''), '''r'''))
__lowerCAmelCase = {int(lowerCamelCase): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCAmelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowerCAmelCase = ConditionalDetrImageProcessor(format=lowerCamelCase)
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCamelCase, return_tensors='''pt''')
__lowerCAmelCase = encoding['''pixel_values''']
logger.info(F"""Converting model {model_name}...""")
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('''DeppMeng/ConditionalDETR''', lowerCamelCase, pretrained=lowerCamelCase).eval()
__lowerCAmelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCAmelCase = '''conditional_detr.''' + src
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = rename_backbone_keys(lowerCamelCase)
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase, is_panoptic=lowerCamelCase)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''')
and not key.startswith('''class_labels_classifier''')
and not key.startswith('''bbox_predictor''')
):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
elif key.startswith('''bbox_attention''') or key.startswith('''mask_head'''):
continue
else:
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
else:
if not key.startswith('''class_labels_classifier''') and not key.startswith('''bbox_predictor'''):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase = ConditionalDetrForSegmentation(lowerCamelCase) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
model.push_to_hub(repo_id=lowerCamelCase, organization='''DepuMeng''', commit_message='''Add model''')
# verify our conversion
__lowerCAmelCase = conditional_detr(lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1E-4)
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1E-4)
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1E-4)
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""")
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 474
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ):
@register_to_config
def __init__( self , lowercase__ = 7_6_8 , ):
super().__init__()
__UpperCAmelCase : str = nn.Parameter(torch.zeros(1 , _UpperCAmelCase))
__UpperCAmelCase : int = nn.Parameter(torch.ones(1 , _UpperCAmelCase))
def A( self , lowercase__ = None , lowercase__ = None , ):
__UpperCAmelCase : int = nn.Parameter(self.mean.to(_UpperCAmelCase).to(_UpperCAmelCase))
__UpperCAmelCase : int = nn.Parameter(self.std.to(_UpperCAmelCase).to(_UpperCAmelCase))
return self
def A( self , lowercase__):
__UpperCAmelCase : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def A( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = (embeds * self.std) + self.mean
return embeds
| 462
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase_ ( _A ):
a__ = None
a__ = None
a__ = None
a__ = None
class UpperCAmelCase_ ( _A ):
def __init__( self : Optional[int] , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict="cls" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Optional[int]=True , **UpperCamelCase__ : int , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = project_dim
__magic_name__ = pooler_fn
__magic_name__ = learn_encoder
__magic_name__ = use_attention_mask
class UpperCAmelCase_ ( _A ):
a__ = [R"""pooler""", R"""logit_scale"""]
a__ = [R"""position_ids""", R"""predictions.decoder.bias"""]
a__ = """roberta"""
a__ = RobertaSeriesConfig
def __init__( self : str , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
super().__init__(UpperCamelCase__ )
__magic_name__ = XLMRobertaModel(UpperCamelCase__ )
__magic_name__ = nn.Linear(config.hidden_size , config.project_dim )
__magic_name__ = getattr(UpperCamelCase__ , """has_pre_transformation""" , UpperCamelCase__ )
if self.has_pre_transformation:
__magic_name__ = nn.Linear(config.hidden_size , config.project_dim )
__magic_name__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase ( self : str , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ) -> str:
"""simple docstring"""
__magic_name__ = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ = self.base_model(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase__ , )
if self.has_pre_transformation:
__magic_name__ = outputs["""hidden_states"""][-2]
__magic_name__ = self.pre_LN(UpperCamelCase__ )
__magic_name__ = self.transformation_pre(UpperCamelCase__ )
return TransformationModelOutput(
projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__magic_name__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 706
|
import math
def a__ ( A_, A_ = 0, A_ = 0 ):
'''simple docstring'''
__magic_name__ = end or len(A_ )
for i in range(A_, A_ ):
__magic_name__ = i
__magic_name__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__magic_name__ = array[temp_index - 1]
temp_index -= 1
__magic_name__ = temp_index_value
return array
def a__ ( A_, A_, A_ ): # Max Heap
'''simple docstring'''
__magic_name__ = index
__magic_name__ = 2 * index + 1 # Left Node
__magic_name__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__magic_name__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__magic_name__ = right_index
if largest != index:
__magic_name__ , __magic_name__ = array[largest], array[index]
heapify(A_, A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = len(A_ )
for i in range(n // 2, -1, -1 ):
heapify(A_, A_, A_ )
for i in range(n - 1, 0, -1 ):
__magic_name__ , __magic_name__ = array[0], array[i]
heapify(A_, 0, A_ )
return array
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = low
__magic_name__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__magic_name__ , __magic_name__ = array[j], array[i]
i += 1
def a__ ( A_ ):
'''simple docstring'''
if len(A_ ) == 0:
return array
__magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) )
__magic_name__ = 16
return intro_sort(A_, 0, len(A_ ), A_, A_ )
def a__ ( A_, A_, A_, A_, A_ ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
__magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 )
__magic_name__ = partition(A_, A_, A_, A_ )
intro_sort(A_, A_, A_, A_, A_ )
__magic_name__ = p
return insertion_sort(A_, A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip()
__lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 76
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A ={
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 463
| 0
|
'''simple docstring'''
import math
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 705
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 1 ,__UpperCamelCase = 10_00 ) -> int:
lowerCamelCase_ = 1
lowerCamelCase_ = 0
for divide_by_number in range(__UpperCamelCase ,digit + 1 ):
lowerCamelCase_ = []
lowerCamelCase_ = numerator
for _ in range(1 ,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCamelCase ):
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = divide_by_number
else:
has_been_divided.append(__UpperCamelCase )
lowerCamelCase_ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384
| 0
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case_ ( ):
'''simple docstring'''
raise RuntimeError("CUDA out of memory." )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_snake_case = nn.Linear(3 , 4 )
_snake_case = nn.BatchNormad(4 )
_snake_case = nn.Linear(4 , 5 )
def UpperCamelCase( self , lowerCamelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase( self ):
_snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase , lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_snake_case , _snake_case = mock_training_loop_function("hello" )
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def UpperCamelCase( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase ):
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def UpperCamelCase( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def UpperCamelCase( self ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def UpperCamelCase( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def UpperCamelCase( self ):
_snake_case = torch.cuda.memory_allocated()
_snake_case = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase )
_snake_case = release_memory(lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase )
| 672
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ : Optional[int] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672
| 1
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __lowerCAmelCase ()-> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 531
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
snake_case_ = set()
snake_case_ = []
def parse_line(SCREAMING_SNAKE_CASE ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE ) > 0:
snake_case_ = '''\n'''.join(SCREAMING_SNAKE_CASE )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE )
buffer.clear()
continue
else:
snake_case_ = line.strip()
buffer.append(SCREAMING_SNAKE_CASE )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE ) as fp:
parse_line(SCREAMING_SNAKE_CASE )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE ) as fp:
parse_line(SCREAMING_SNAKE_CASE )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = set()
snake_case_ = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for p in os.listdir(SCREAMING_SNAKE_CASE ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return selected_warnings
if __name__ == "__main__":
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
return values.split(''',''' )
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 531
| 1
|
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple=14 , _lowercase : List[Any]=7 , _lowercase : Optional[Any]=True , _lowercase : str=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]=99 , _lowercase : Any=32 , _lowercase : Union[str, Any]=5 , _lowercase : List[Any]=4 , _lowercase : Optional[Any]=37 , _lowercase : Dict="gelu" , _lowercase : Tuple=0.1 , _lowercase : List[str]=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : str=16 , _lowercase : List[Any]=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : int=3 , _lowercase : List[Any]=4 , _lowercase : Union[str, Any]=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_labels
__UpperCAmelCase = use_mc_token_ids
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
__UpperCAmelCase = self.vocab_size - 1
def a ( self : Optional[Any] ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
if self.use_mc_token_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
__UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a ( self : Tuple ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def a ( self : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Tuple , _lowercase : int , *_lowercase : Tuple ):
__UpperCAmelCase = CTRLModel(config=_lowercase )
model.to(_lowercase )
model.eval()
model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase )
model(_lowercase , token_type_ids=_lowercase )
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def a ( self : Any , _lowercase : Any , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Any , *_lowercase : str ):
__UpperCAmelCase = CTRLLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def a ( self : Tuple , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : List[str] , *_lowercase : Optional[int] ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = CTRLForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ : int = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : List[str] = True
a__ : Dict = False
a__ : int = False
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def a ( self : List[Any] ):
__UpperCAmelCase = CTRLModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , n_embd=37 )
def a ( self : List[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def a ( self : Tuple ):
self.config_tester.run_common_tests()
def a ( self : Dict ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a ( self : Tuple ):
pass
@slow
def a ( self : List[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = CTRLModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def a ( self : Union[str, Any] ):
pass
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_lowercase )
__UpperCAmelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=_lowercase ) # Legal the president is
__UpperCAmelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__UpperCAmelCase = model.generate(_lowercase , do_sample=_lowercase )
self.assertListEqual(output_ids[0].tolist() , _lowercase )
| 49
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: Path , _lowerCamelCase: str = None , _lowerCamelCase: str = None , _lowerCamelCase: str = None , ):
if config_name_or_path is None:
__SCREAMING_SNAKE_CASE : List[str] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Tuple = question_encoder_name_or_path
__SCREAMING_SNAKE_CASE : int = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__SCREAMING_SNAKE_CASE : List[Any] = RagConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = gen_config
__SCREAMING_SNAKE_CASE : Union[str, Any] = question_encoder_config
__SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained_question_encoder_generator(
_lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
rag_model.save_pretrained(_lowerCamelCase )
# Sanity check.
model_class.from_pretrained(_lowerCamelCase )
# Save tokenizers.
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(_lowerCamelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
UpperCamelCase__ : Dict = parser.parse_args()
UpperCamelCase__ : Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 578
| 0
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Optional[int] = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = False ) -> str:
'''simple docstring'''
_lowercase =scheduler
_lowercase =optimizers if isinstance(lowerCAmelCase , (list, tuple) ) else [optimizers]
_lowercase =split_batches
_lowercase =step_with_optimizer
_lowercase =GradientState()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_lowercase =AcceleratorState().num_processes
for _ in range(lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
return self.scheduler.get_last_lr()
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.scheduler.state_dict()
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
self.scheduler.load_state_dict(lowerCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
return self.scheduler.get_lr()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.print_lr(*lowerCAmelCase , **lowerCAmelCase )
| 291
| 0
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case : Any = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
snake_case : str = {
'Salesforce/codegen-350M-mono': 2_0_4_8,
}
class lowerCAmelCase__ ( __A ):
__A : Optional[int] = VOCAB_FILES_NAMES
__A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ['input_ids', 'attention_mask']
__A : str = CodeGenTokenizer
def __init__( self : Any , _A : Union[str, Any]=None , _A : Any=None , _A : Union[str, Any]=None , _A : str="<|endoftext|>" , _A : Dict="<|endoftext|>" , _A : Any="<|endoftext|>" , _A : Optional[int]=False , **_A : Union[str, Any] , ):
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
if kwargs.pop("add_bos_token" , _A):
A__ : List[str] = kwargs.pop("name_or_path" , "")
raise ValueError(
"Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly.")
A__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , _A) != add_prefix_space:
A__ : List[Any] = getattr(_A , pre_tok_state.pop("type"))
A__ : Tuple = add_prefix_space
A__ : Union[str, Any] = pre_tok_class(**_A)
A__ : str = add_prefix_space
def _lowercase ( self : Tuple , *_A : Optional[int] , **_A : str):
A__ : List[str] = kwargs.get("is_split_into_words" , _A)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A)
def _lowercase ( self : Optional[int] , *_A : Any , **_A : str):
A__ : str = kwargs.get("is_split_into_words" , _A)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A)
def _lowercase ( self : int , _A : str , _A : Optional[str] = None):
A__ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def _lowercase ( self : Optional[int] , _A : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _A : bool = False , _A : bool = None , _A : Optional[List[str]] = None , **_A : Dict , ):
A__ : str = super().decode(
token_ids=_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , **_A , )
if truncate_before_pattern is not None and len(_A) > 0:
A__ : List[str] = self.truncate(_A , _A)
return decoded_text
def _lowercase ( self : int , _A : Tuple , _A : List[Any]):
def find_re(_A : Dict , _A : Optional[int] , _A : int):
A__ : Union[str, Any] = pattern.search(_A , _A)
return m.start() if m else -1
A__ : str = [re.compile(_A , re.MULTILINE) for pattern in truncate_before_pattern]
A__ : Any = list(re.finditer("^print" , _A , re.MULTILINE))
if len(_A) > 1:
A__ : List[str] = completion[: prints[1].start()]
A__ : Optional[int] = list(re.finditer("^def" , _A , re.MULTILINE))
if len(_A) > 1:
A__ : Optional[Any] = completion[: defs[1].start()]
A__ : Any = 0
A__ : int = [
pos for pos in [find_re(_A , _A , _A) for terminal in terminals] if pos != -1
]
if len(_A) > 0:
return completion[: min(_A)]
else:
return completion
| 708
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
snake_case : Optional[int] = get_logger()
snake_case : Optional[dict] = None
class lowerCAmelCase__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Optional[Any] , _A : Optional[int]=None , _A : str=None , **_A : Tuple):
super().__init__(features=_A)
import jax
from jaxlib.xla_client import Device
if isinstance(_A , _A):
raise ValueError(
F'Expected {device} to be a `str` not {type(_A)}, as `jaxlib.xla_extension.Device` '
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`.")
A__ : Union[str, Any] = device if isinstance(_A , _A) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
A__ : str = str(jax.devices()[0])
A__ : List[Any] = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(_A): device for device in jax.devices()}
def _lowercase ( self : Tuple , _A : int):
import jax
import jax.numpy as jnp
if isinstance(_A , _A) and column:
if all(
isinstance(_A , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(_A , axis=0)
return column
def _lowercase ( self : List[str] , _A : Union[str, Any]):
import jax
import jax.numpy as jnp
if isinstance(_A , (str, bytes, type(_A))):
return value
elif isinstance(_A , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
A__ : Dict = {}
if isinstance(_A , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A__ : Union[str, Any] = {"dtype": jnp.intaa}
else:
A__ : int = {"dtype": jnp.intaa}
elif isinstance(_A , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
A__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image):
A__ : Any = np.asarray(_A)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A__ : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A , **{**default_dtype, **self.jnp_array_kwargs})
def _lowercase ( self : Dict , _A : Any):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(_A , "__array__") and not isinstance(_A , jax.Array):
A__ : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A) for substruct in data_struct])
elif isinstance(_A , (list, tuple)):
return self._consolidate([self.recursive_tensorize(_A) for substruct in data_struct])
return self._tensorize(_A)
def _lowercase ( self : Optional[Any] , _A : dict):
return map_nested(self._recursive_tensorize , _A , map_list=_A)
def _lowercase ( self : List[str] , _A : pa.Table):
A__ : Dict = self.numpy_arrow_extractor().extract_row(_A)
A__ : int = self.python_features_decoder.decode_row(_A)
return self.recursive_tensorize(_A)
def _lowercase ( self : int , _A : pa.Table):
A__ : int = self.numpy_arrow_extractor().extract_column(_A)
A__ : Optional[int] = self.python_features_decoder.decode_column(_A , pa_table.column_names[0])
A__ : Dict = self.recursive_tensorize(_A)
A__ : List[Any] = self._consolidate(_A)
return column
def _lowercase ( self : Tuple , _A : pa.Table):
A__ : Any = self.numpy_arrow_extractor().extract_batch(_A)
A__ : str = self.python_features_decoder.decode_batch(_A)
A__ : Optional[Any] = self.recursive_tensorize(_A)
for column_name in batch:
A__ : Optional[int] = self._consolidate(batch[column_name])
return batch
| 182
| 0
|
from __future__ import annotations
a__ = list[list[int]]
# assigning initial values to the grid
a__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __UpperCAmelCase ( __a : Matrix ,__a : int ,__a : int ,__a : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __UpperCAmelCase ( __a : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __UpperCAmelCase ( __a : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
_a , _a : Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(__a ,__a ,__a ,__a ):
_a : List[str] = digit
if sudoku(__a ) is not None:
return grid
_a : Optional[int] = 0
return None
def __UpperCAmelCase ( __a : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a ,end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
a__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 14
|
def __lowerCamelCase ( _lowercase ) -> list:
UpperCamelCase = len(_lowercase )
for _ in range(_lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCamelCase , UpperCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_snake_case = list(range(10, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 282
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __UpperCamelCase :
__A = XGLMConfig
__A = {}
__A = '''gelu'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=14 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , ) -> Optional[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = d_model
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = ffn_dim
lowercase = activation_function
lowercase = activation_dropout
lowercase = attention_dropout
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = None
lowercase = 0
lowercase = 2
lowercase = 1
def _a ( self ) -> Dict:
'''simple docstring'''
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = self.get_config()
lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowerCAmelCase , )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__A = (TFXGLMForCausalLM,) if is_tf_available() else ()
__A = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__A = False
__A = False
__A = False
def _a ( self ) -> int:
'''simple docstring'''
lowercase = TFXGLMModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFXGLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _a ( self ) -> int:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __UpperCamelCase (unittest.TestCase ):
@slow
def _a ( self , _lowerCAmelCase=True ) -> Dict:
'''simple docstring'''
lowercase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowercase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowercase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
lowercase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCAmelCase )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowercase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
lowercase = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
lowercase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase , seed=[7, 0] )
lowercase = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCAmelCase )
lowercase = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def _a ( self ) -> int:
'''simple docstring'''
lowercase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowercase = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowercase = """left"""
# use different length sentences to test batching
lowercase = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
lowercase = tokenizer(_lowerCAmelCase , return_tensors="""tf""" , padding=_lowerCAmelCase )
lowercase = inputs["""input_ids"""]
lowercase = model.generate(input_ids=_lowerCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
lowercase = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowercase = model.generate(input_ids=_lowerCAmelCase , max_new_tokens=12 )
lowercase = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowercase = model.generate(input_ids=_lowerCAmelCase , max_new_tokens=12 )
lowercase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCAmelCase )
lowercase = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [non_padded_sentence, padded_sentence] )
| 653
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=99 , lowerCAmelCase_=0 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_="last" , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
a_ : Dict = parent
a_ : List[str] = batch_size
a_ : Any = seq_length
a_ : Tuple = is_training
a_ : Optional[Any] = use_input_lengths
a_ : int = use_token_type_ids
a_ : List[Any] = use_labels
a_ : Tuple = gelu_activation
a_ : Union[str, Any] = sinusoidal_embeddings
a_ : Tuple = causal
a_ : List[str] = asm
a_ : Any = n_langs
a_ : str = vocab_size
a_ : Any = n_special
a_ : str = hidden_size
a_ : int = num_hidden_layers
a_ : str = num_attention_heads
a_ : Dict = hidden_dropout_prob
a_ : str = attention_probs_dropout_prob
a_ : Any = max_position_embeddings
a_ : Dict = type_vocab_size
a_ : Tuple = type_sequence_label_size
a_ : Optional[Any] = initializer_range
a_ : str = num_labels
a_ : Tuple = num_choices
a_ : List[Any] = summary_type
a_ : List[Any] = use_proj
a_ : Optional[int] = scope
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Dict = None
if self.use_input_lengths:
a_ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a_ : Union[str, Any] = None
if self.use_token_type_ids:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a_ : Tuple = None
a_ : Any = None
a_ : Dict = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : str = ids_tensor([self.batch_size] , 2 ).float()
a_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Tuple = FlaubertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Tuple = model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
a_ : Union[str, Any] = model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
a_ : Optional[int] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : int = FlaubertWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : List[Any] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Optional[int] = FlaubertForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : str = model(lowerCAmelCase_ )
a_ : List[Any] = model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Optional[Any] = FlaubertForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Dict = model(lowerCAmelCase_ )
a_ : Optional[Any] = model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
a_ : Dict = model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((a_) , ) : List[str] = result_with_labels.to_tuple()
a_ : List[Any] = model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((a_) , ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : List[str] = FlaubertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Union[str, Any] = model(lowerCAmelCase_ )
a_ : Union[str, Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : List[str] = self.num_labels
a_ : Dict = FlaubertForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Optional[Any] = self.num_choices
a_ : Tuple = FlaubertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Any = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = config_and_inputs
a_ : Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
a_ : List[Any] = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
a_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = FlaubertModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase_ )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = FlaubertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ : List[Any] = True
a_ : Optional[Any] = model_class(config=lowerCAmelCase_ )
a_ : Optional[int] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Tuple = torch.jit.trace(
lowerCAmelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """traced_model.pt""" ) )
a_ : Optional[int] = torch.jit.load(os.path.join(lowerCAmelCase_ , """traced_model.pt""" ) , map_location=lowerCAmelCase_ )
loaded(inputs_dict["""input_ids"""].to(lowerCAmelCase_ ) , inputs_dict["""attention_mask"""].to(lowerCAmelCase_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
a_ : Optional[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
a_ : Dict = model(lowerCAmelCase_ )[0]
a_ : Optional[Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
a_ : Dict = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 577
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
super().__init__()
a_ : Optional[Any] = module
a_ : Union[str, Any] = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
a_ : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = "bigscience/bloom-1b7"
# Constant values
a_ = 2.109659552692574
a_ = "Hello my name is"
a_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a_ = 10
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = AutoTokenizer.from_pretrained(self.model_name )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
a_ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
a_ : Any = config.to_dict()
a_ : Dict = config.to_diff_dict()
a_ : Dict = config.to_json_string()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
a_ : List[str] = self.model_fpaa.get_memory_footprint()
a_ : Optional[int] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a_ : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _lowerCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : str = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = BitsAndBytesConfig()
a_ : Union[str, Any] = True
a_ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : Tuple = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowerCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
a_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a_ : List[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : Union[str, Any] = self.model_fpaa.to(torch.floataa )
a_ : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a_ : int = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
a_ : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
a_ : Dict = self.model_fpaa.float()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowerCAmelCase ( cls ):
'''simple docstring'''
a_ : List[str] = """t5-small"""
a_ : Any = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
a_ : Optional[Any] = AutoTokenizer.from_pretrained(cls.model_name )
a_ : int = """Translate in German: Hello, my dog is cute"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
a_ : Dict = TaForConditionalGeneration._keep_in_fpaa_modules
a_ : str = None
# test with `t5-small`
a_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : str = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
a_ : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Any = model.generate(**lowerCAmelCase_ )
a_ : List[str] = modules
def _lowerCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a_ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a_ : Tuple = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Union[str, Any] = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
a_ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Tuple = model.generate(**lowerCAmelCase_ )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
a_ : Dict = """bigscience/bloom-560m"""
a_ : Any = """t5-small"""
# Different types of model
a_ : Tuple = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
a_ : Any = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
a_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
a_ : str = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a_ : List[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
a_ : Dict = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = """facebook/opt-350m"""
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
a_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a_ : int = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a_ : Any = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
a_ : List[str] = LoRALayer(module.q_proj , rank=16 )
a_ : Union[str, Any] = LoRALayer(module.k_proj , rank=16 )
a_ : Optional[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a_ : Tuple = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a_ : List[str] = model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "gpt2-xl"
a_ = 3.3191854854152187
| 577
| 1
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCAmelCase ( A : Optional[int] , A : Dict , A : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = OmegaConf.load(A )
_UpperCAmelCase = torch.load(A , map_location='cpu' )['model']
_UpperCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase = {}
_UpperCAmelCase = 'first_stage_model.'
for key in keys:
if key.startswith(A ):
_UpperCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase = {}
_UpperCAmelCase = 'model.diffusion_model.'
for key in keys:
if key.startswith(A ):
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase = config.model.params.first_stage_config.params
_UpperCAmelCase = config.model.params.unet_config.params
_UpperCAmelCase = VQModel(**A ).eval()
vqvae.load_state_dict(A )
_UpperCAmelCase = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
_UpperCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
_UpperCAmelCase = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowercase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 710
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24
| 0
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowercase ( a__ : int ) -> str:
"""simple docstring"""
random.seed(a__ )
np.random.seed(a__ )
torch.manual_seed(a__ )
torch.cuda.manual_seed_all(a__ )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = 0.99_99 , lowerCamelCase_ = 0.0 , lowerCamelCase_ = 0 , lowerCamelCase_ = False , lowerCamelCase_ = 1.0 , lowerCamelCase_ = 2 / 3 , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase_ , torch.nn.Module ):
_UpperCamelCase = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase_ , standard_warn=lowerCamelCase_ , )
_UpperCamelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCamelCase = True
if kwargs.get("max_value" , lowerCamelCase_ ) is not None:
_UpperCamelCase = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
_UpperCamelCase = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase_ ) is not None:
_UpperCamelCase = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
_UpperCamelCase = kwargs["min_value"]
_UpperCamelCase = list(lowerCamelCase_ )
_UpperCamelCase = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase_ ) is not None:
_UpperCamelCase = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
self.to(device=kwargs["device"] )
_UpperCamelCase = None
_UpperCamelCase = decay
_UpperCamelCase = min_decay
_UpperCamelCase = update_after_step
_UpperCamelCase = use_ema_warmup
_UpperCamelCase = inv_gamma
_UpperCamelCase = power
_UpperCamelCase = 0
_UpperCamelCase = None # set in `step()`
_UpperCamelCase = model_cls
_UpperCamelCase = model_config
@classmethod
def lowercase ( cls , lowerCamelCase_ , lowerCamelCase_ ) -> "EMAModel":
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = model_cls.load_config(lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ )
_UpperCamelCase = model_cls.from_pretrained(lowerCamelCase_ )
_UpperCamelCase = cls(model.parameters() , model_cls=lowerCamelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase_ )
return ema_model
def lowercase ( self , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCamelCase = self.model_cls.from_config(self.model_config )
_UpperCamelCase = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase_ )
model.register_to_config(**lowerCamelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> float:
"""simple docstring"""
_UpperCamelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCamelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCamelCase = (1 + step) / (10 + step)
_UpperCamelCase = min(lowerCamelCase_ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCamelCase = max(lowerCamelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase ( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCamelCase_ , torch.nn.Module ):
_UpperCamelCase = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase_ , standard_warn=lowerCamelCase_ , )
_UpperCamelCase = parameters.parameters()
_UpperCamelCase = list(lowerCamelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCamelCase = self.get_decay(self.optimization_step )
_UpperCamelCase = decay
_UpperCamelCase = 1 - decay
_UpperCamelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCamelCase = deepspeed.zero.GatheredParameters(lowerCamelCase_ , modifier_rank=lowerCamelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
_UpperCamelCase = list(lowerCamelCase_ )
for s_param, param in zip(self.shadow_params , lowerCamelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase ( self , lowerCamelCase_=None , lowerCamelCase_=None ) -> None:
"""simple docstring"""
_UpperCamelCase = [
p.to(device=lowerCamelCase_ , dtype=lowerCamelCase_ ) if p.is_floating_point() else p.to(device=lowerCamelCase_ )
for p in self.shadow_params
]
def lowercase ( self ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase ( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
_UpperCamelCase = [param.detach().cpu().clone() for param in parameters]
def lowercase ( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCamelCase = None
def lowercase ( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(lowerCamelCase_ )
_UpperCamelCase = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCamelCase = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase_ ):
raise ValueError("Invalid min_decay" )
_UpperCamelCase = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase_ ):
raise ValueError("Invalid optimization_step" )
_UpperCamelCase = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase_ ):
raise ValueError("Invalid update_after_step" )
_UpperCamelCase = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase_ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCamelCase = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCamelCase = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCamelCase = state_dict.get("shadow_params" , lowerCamelCase_ )
if shadow_params is not None:
_UpperCamelCase = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase_ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 147
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowercase ( a__ : Dict ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
_UpperCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(a__ ) < version.parse("0.17.0" ):
return method
def wrapper(self : List[str] , *a__ : str , **a__ : int ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *a__ , **a__ )
return wrapper
| 147
| 1
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=3.6 )->List[Any]:
'''simple docstring'''
A_ : int = tokenizer
A_ : Optional[Any] = tokenizer.bos_token_id
A_ : Tuple = dataset
A_ : Tuple = seq_length
A_ : Optional[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self )->Optional[int]:
'''simple docstring'''
A_ : Any = iter(self.dataset )
A_ : Any = True
while more_examples:
A_ , A_ : Union[str, Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_SCREAMING_SNAKE_CASE )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A_ : Union[str, Any] = False
break
A_ : List[str] = tokenizer(_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
A_ : Tuple = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , self.seq_length ):
A_ : Tuple = all_token_ids[i : i + self.seq_length]
if len(_SCREAMING_SNAKE_CASE ) == self.seq_length:
yield torch.tensor(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = {'''streaming''': True}
A_ : int = load_dataset(args.dataset_name , split='''train''' , **SCREAMING_SNAKE_CASE )
A_ : str = ConstantLengthDataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , seq_length=args.seq_length )
A_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
return eval_dataloader
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
model.eval()
A_ : Tuple = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
with torch.no_grad():
A_ : Tuple = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
A_ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A_ : Optional[Any] = torch.mean(torch.cat(SCREAMING_SNAKE_CASE ) )
try:
A_ : Any = torch.exp(SCREAMING_SNAKE_CASE )
except OverflowError:
A_ : Optional[Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCamelCase = Accelerator()
# Parse configuration
UpperCamelCase = HfArgumentParser(EvaluationArguments)
UpperCamelCase = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCamelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCamelCase , UpperCamelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
UpperCamelCase , UpperCamelCase = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 152
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[str] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "deformable_detr"
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict ,A : str=True ,A : Dict=None ,A : Tuple=3 ,A : Optional[Any]=3_00 ,A : Optional[Any]=10_24 ,A : int=6 ,A : Tuple=10_24 ,A : List[Any]=8 ,A : Any=6 ,A : int=10_24 ,A : int=8 ,A : Tuple=0.0 ,A : int=True ,A : Any="relu" ,A : Dict=2_56 ,A : List[str]=0.1 ,A : Optional[int]=0.0 ,A : List[str]=0.0 ,A : List[Any]=0.02 ,A : Optional[Any]=1.0 ,A : Optional[Any]=True ,A : Optional[Any]=False ,A : Optional[Any]="sine" ,A : int="resnet50" ,A : Dict=True ,A : List[str]=False ,A : Tuple=4 ,A : int=4 ,A : str=4 ,A : Optional[int]=False ,A : Optional[Any]=3_00 ,A : Optional[int]=False ,A : Union[str, Any]=1 ,A : Optional[Any]=5 ,A : List[str]=2 ,A : List[str]=1 ,A : Dict=1 ,A : Union[str, Any]=5 ,A : Optional[int]=2 ,A : Optional[int]=0.1 ,A : Tuple=0.25 ,A : int=False ,**A : Optional[int] ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__A = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A ,A ):
__A = backbone_config.get("model_type" )
__A = CONFIG_MAPPING[backbone_model_type]
__A = config_class.from_dict(A )
__A = use_timm_backbone
__A = backbone_config
__A = num_channels
__A = num_queries
__A = max_position_embeddings
__A = d_model
__A = encoder_ffn_dim
__A = encoder_layers
__A = encoder_attention_heads
__A = decoder_ffn_dim
__A = decoder_layers
__A = decoder_attention_heads
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = activation_function
__A = init_std
__A = init_xavier_std
__A = encoder_layerdrop
__A = auxiliary_loss
__A = position_embedding_type
__A = backbone
__A = use_pretrained_backbone
__A = dilation
# deformable attributes
__A = num_feature_levels
__A = encoder_n_points
__A = decoder_n_points
__A = two_stage
__A = two_stage_num_proposals
__A = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__A = class_cost
__A = bbox_cost
__A = giou_cost
# Loss coefficients
__A = mask_loss_coefficient
__A = dice_loss_coefficient
__A = bbox_loss_coefficient
__A = giou_loss_coefficient
__A = eos_coefficient
__A = focal_alpha
__A = disable_custom_kernels
super().__init__(is_encoder_decoder=A ,**A )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.d_model
def UpperCamelCase_ ( self : List[str] ):
__A = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__A = self.backbone_config.to_dict()
__A = self.__class__.model_type
return output
| 55
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def is_valid_tree(_A ) -> bool:
if node is None:
return True
if not isinstance(_A , _A ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_A ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_A , _A , _A ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _A , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _A )
)
return is_binary_search_tree_recursive_check(_A , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
| 0
|
"""simple docstring"""
from PIL import Image
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Dict = image.size
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Optional[int] = image.load()
for i in range(lowercase_ ):
for j in range(lowercase_ ):
_UpperCamelCase : List[str] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase_ ):
for i in range(lowercase_ ):
_UpperCamelCase : Tuple = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase__ = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A: Dict = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 126
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : str = ["""image_processor""", """tokenizer"""]
_A : str = """OwlViTImageProcessor"""
_A : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __A=None , __A=None , **__A ):
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
__UpperCAmelCase = kwargs.pop('feature_extractor' )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , __A="max_length" , __A="np" , **__A ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
__UpperCAmelCase = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
__UpperCAmelCase = []
# Maximum number of queries across batch
__UpperCAmelCase = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
__UpperCAmelCase = t + [' '] * (max_num_queries - len(__A ))
__UpperCAmelCase = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCAmelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCAmelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCAmelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCAmelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCAmelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCAmelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCAmelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCAmelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCAmelCase = BatchEncoding()
__UpperCAmelCase = input_ids
__UpperCAmelCase = attention_mask
if query_images is not None:
__UpperCAmelCase = BatchEncoding()
__UpperCAmelCase = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
__UpperCAmelCase = query_pixel_values
if images is not None:
__UpperCAmelCase = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.image_processor.post_process(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.image_processor.post_process_object_detection(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 126
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
__UpperCAmelCase : Dict = TypeVar("_T")
class __snake_case ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A : Iterable[_T] | None = None ):
__snake_case: list[_T] = list(iterable or [] )
__snake_case: list[_T] = []
def __len__( self : Union[str, Any] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Any ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def UpperCAmelCase__ ( self : List[Any] , A : _T ):
self._stacka.append(A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Any = self._stacka.pop
__snake_case: Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 155
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
# load base model
__snake_case: str = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa)
# load LoRA weight from .safetensors
__snake_case: Dict = load_file(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case: Optional[int] = key.split(""".""")[0].split(LORA_PREFIX_TEXT_ENCODER + """_""")[-1].split("""_""")
__snake_case: Union[str, Any] = pipeline.text_encoder
else:
__snake_case: Optional[int] = key.split(""".""")[0].split(LORA_PREFIX_UNET + """_""")[-1].split("""_""")
__snake_case: List[Any] = pipeline.unet
# find the target layer
__snake_case: Optional[Any] = layer_infos.pop(0)
while len(SCREAMING_SNAKE_CASE__) > -1:
try:
__snake_case: Optional[Any] = curr_layer.__getattr__(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__snake_case: Optional[int] = layer_infos.pop(0)
elif len(SCREAMING_SNAKE_CASE__) == 0:
break
except Exception:
if len(SCREAMING_SNAKE_CASE__) > 0:
temp_name += "_" + layer_infos.pop(0)
else:
__snake_case: Tuple = layer_infos.pop(0)
__snake_case: Any = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up"""))
pair_keys.append(SCREAMING_SNAKE_CASE__)
else:
pair_keys.append(SCREAMING_SNAKE_CASE__)
pair_keys.append(key.replace("""lora_up""" , """lora_down"""))
# update weight
if len(state_dict[pair_keys[0]].shape) == 4:
__snake_case: Union[str, Any] = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).unsqueeze(2).unsqueeze(3)
else:
__snake_case: List[Any] = state_dict[pair_keys[0]].to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# update visited list
for item in pair_keys:
visited.append(SCREAMING_SNAKE_CASE__)
return pipeline
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Union[str, Any] = args.base_model_path
__UpperCAmelCase : str = args.checkpoint_path
__UpperCAmelCase : List[str] = args.dump_path
__UpperCAmelCase : Optional[int] = args.lora_prefix_unet
__UpperCAmelCase : Optional[int] = args.lora_prefix_text_encoder
__UpperCAmelCase : int = args.alpha
__UpperCAmelCase : List[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__UpperCAmelCase : Union[str, Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 155
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case (_a ):
def __init__( self : List[Any] , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
'''simple docstring'''
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[torch.FloatTensor] = None , **_UpperCAmelCase : Dict , ) -> str:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : int = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : List[Any] = len(_UpperCAmelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(_UpperCAmelCase )}." )
# get prompt text embeddings
_lowerCAmelCase : Any = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_lowerCAmelCase : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
_lowerCAmelCase : Any = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_lowerCAmelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = text_embeddings.shape
_lowerCAmelCase : int = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
_lowerCAmelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : List[str]
if negative_prompt is None:
_lowerCAmelCase : Optional[Any] = [""""""]
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !="
f" {type(_UpperCAmelCase )}." )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : str = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
_lowerCAmelCase : Optional[int] = negative_prompt
_lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
_lowerCAmelCase : str = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" , )
_lowerCAmelCase : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase : Union[str, Any] = uncond_embeddings.shape[1]
_lowerCAmelCase : Dict = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 )
_lowerCAmelCase : str = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_lowerCAmelCase : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase : str = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(self.device )
_lowerCAmelCase : str = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase : Optional[int] = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
_lowerCAmelCase : str = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
_lowerCAmelCase : Tuple = latents_reference.to(self.device )
_lowerCAmelCase : Optional[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_lowerCAmelCase : Any = (latents_shape[3] - latents_shape_reference[3]) // 2
_lowerCAmelCase : int = (latents_shape[2] - latents_shape_reference[2]) // 2
_lowerCAmelCase : Dict = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_lowerCAmelCase : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_lowerCAmelCase : Dict = 0 if dx < 0 else dx
_lowerCAmelCase : Any = 0 if dy < 0 else dy
_lowerCAmelCase : Optional[Any] = max(-dx , 0 )
_lowerCAmelCase : Union[str, Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_lowerCAmelCase : Tuple = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : Dict = {}
if accepts_eta:
_lowerCAmelCase : Dict = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase : int = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
_lowerCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Optional[int] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[str] = 1 / 0.18215 * latents
_lowerCAmelCase : str = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_lowerCAmelCase : Any = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors="""pt""" ).to(
self.device )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.safety_checker(
images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_lowerCAmelCase : Optional[int] = None
if output_type == "pil":
_lowerCAmelCase : Optional[int] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 429
|
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1024 , _lowerCAmelCase=1024 , _lowerCAmelCase=False , **_lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = SeqaSeqDataset(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , type_path="train" , **_lowerCAmelCase )
UpperCamelCase : List[Any] = tok.pad_token_id
def get_lens(_lowerCAmelCase ):
UpperCamelCase : Dict = tqdm(
DataLoader(_lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase : Optional[int] = []
for batch in dl:
UpperCamelCase : Dict = batch["input_ids"].ne(_lowerCAmelCase ).sum(1 ).tolist()
UpperCamelCase : List[Any] = batch["labels"].ne(_lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCAmelCase , _lowerCAmelCase ):
max_lens.append(max(_lowerCAmelCase , _lowerCAmelCase ) )
else:
max_lens.extend(_lowerCAmelCase )
return max_lens
UpperCamelCase : Optional[int] = get_lens(_lowerCAmelCase )
UpperCamelCase : str = SeqaSeqDataset(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , type_path="val" , **_lowerCAmelCase )
UpperCamelCase : Any = get_lens(_lowerCAmelCase )
pickle_save(_lowerCAmelCase , train_ds.len_file )
pickle_save(_lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase = 101 ) -> Any:
"""simple docstring"""
__magic_name__ : List[str] = length
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__( self , lowerCamelCase ) -> int:
"""simple docstring"""
return i
class A__ :
def __call__( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(lowerCamelCase ), "labels": torch.tensor(lowerCamelCase )}
class A__ ( nn.Module ):
def __init__( self ) -> Tuple:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__magic_name__ : Tuple = nn.Linear(120 , 80 )
def lowercase ( self , lowerCamelCase , lowerCamelCase=None ) -> List[str]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A__ ( __SCREAMING_SNAKE_CASE ):
@require_torch_neuroncore
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
__magic_name__ : Dict = self.get_auto_remove_tmp_dir()
__magic_name__ : str = F'''--output_dir {output_dir}'''.split()
__magic_name__ : Optional[Any] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A__ ( __SCREAMING_SNAKE_CASE ):
@require_torch_multi_gpu
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Optional[Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
__magic_name__ : str = self.get_auto_remove_tmp_dir()
__magic_name__ : List[str] = F'''--output_dir {output_dir}'''.split()
__magic_name__ : Tuple = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase_ = HfArgumentParser((TrainingArguments,))
lowercase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowercase_ = DummyDataset(dataset_length)
def lowerCAmelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
__magic_name__ : str = list(range(len(UpperCAmelCase ) ) )
__magic_name__ : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowercase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = 2
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = None
| 154
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
_lowerCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = git.Repo(search_parent_directories=snake_case__ )
A_ : List[str] = {
"""repo_id""": str(snake_case__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(snake_case__ , """git_log.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=4 )
def __UpperCamelCase ( snake_case__ ):
if params.n_gpu <= 0:
A_ : Dict = 0
A_ : str = -1
A_ : int = True
A_ : Union[str, Any] = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
A_ : str = int(os.environ["""WORLD_SIZE"""] )
A_ : int = int(os.environ["""N_GPU_NODE"""] )
A_ : int = int(os.environ["""RANK"""] )
# number of nodes / node ID
A_ : Optional[int] = params.world_size // params.n_gpu_per_node
A_ : Optional[int] = params.global_rank // params.n_gpu_per_node
A_ : Tuple = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
A_ : Dict = 1
A_ : Tuple = 0
A_ : Dict = 0
A_ : Optional[int] = 0
A_ : List[str] = 1
A_ : List[Any] = 1
A_ : List[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A_ : Optional[int] = params.node_id == 0 and params.local_rank == 0
A_ : Optional[Any] = params.n_nodes > 1
# summary
A_ : str = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def __UpperCamelCase ( snake_case__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 480
| 0
|
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None) -> Tuple:
a__ =data
a__ =previous
a__ =next_node
def __str__( self) -> str:
return F"""{self.data}"""
def __UpperCamelCase ( self) -> int:
return self.data
def __UpperCamelCase ( self) -> Any:
return self.next
def __UpperCamelCase ( self) -> int:
return self.previous
class lowercase_ :
def __init__( self , lowercase_) -> str:
a__ =head
def __iter__( self) -> Union[str, Any]:
return self
def __UpperCamelCase ( self) -> str:
if not self.current:
raise StopIteration
else:
a__ =self.current.get_data()
a__ =self.current.get_next()
return value
class lowercase_ :
def __init__( self) -> Union[str, Any]:
a__ =None # First node in list
a__ =None # Last node in list
def __str__( self) -> Dict:
a__ =self.head
a__ =[]
while current is not None:
nodes.append(current.get_data())
a__ =current.get_next()
return " ".join(str(lowercase_) for node in nodes)
def __contains__( self , lowercase_) -> Any:
a__ =self.head
while current:
if current.get_data() == value:
return True
a__ =current.get_next()
return False
def __iter__( self) -> Any:
return LinkedListIterator(self.head)
def __UpperCamelCase ( self) -> List[str]:
if self.head:
return self.head.get_data()
return None
def __UpperCamelCase ( self) -> Optional[int]:
if self.tail:
return self.tail.get_data()
return None
def __UpperCamelCase ( self , lowercase_) -> None:
if self.head is None:
a__ =node
a__ =node
else:
self.insert_before_node(self.head , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> None:
if self.head is None:
self.set_head(lowercase_)
else:
self.insert_after_node(self.tail , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =Node(lowercase_)
if self.head is None:
self.set_head(lowercase_)
else:
self.set_tail(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =node
a__ =node.previous
if node.get_previous() is None:
a__ =node_to_insert
else:
a__ =node_to_insert
a__ =node_to_insert
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =node
a__ =node.next
if node.get_next() is None:
a__ =node_to_insert
else:
a__ =node_to_insert
a__ =node_to_insert
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =1
a__ =Node(lowercase_)
a__ =self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_)
return
current_position += 1
a__ =node.next
self.insert_after_node(self.tail , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Node:
a__ =self.head
while node:
if node.get_data() == item:
return node
a__ =node.get_next()
raise Exception('Node not found')
def __UpperCamelCase ( self , lowercase_) -> Dict:
if (node := self.get_node(lowercase_)) is not None:
if node == self.head:
a__ =self.head.get_next()
if node == self.tail:
a__ =self.tail.get_previous()
self.remove_node_pointers(lowercase_)
@staticmethod
def __UpperCamelCase ( lowercase_) -> None:
if node.get_next():
a__ =node.previous
if node.get_previous():
a__ =node.next
a__ =None
a__ =None
def __UpperCamelCase ( self) -> Any:
return self.head is None
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
a__ : List[Any] = random.Random()
def UpperCAmelCase_( a__ , a__=1.0 , a__=None , a__=None ):
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE : List[str] = global_rng
SCREAMING_SNAKE_CASE : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=400 , _lowerCamelCase=2000 , _lowerCamelCase=24 , _lowerCamelCase=24 , _lowerCamelCase=0.0 , _lowerCamelCase=1_6000 , _lowerCamelCase=True , _lowerCamelCase=True , ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = min_seq_length
SCREAMING_SNAKE_CASE : str = max_seq_length
SCREAMING_SNAKE_CASE : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = feature_size
SCREAMING_SNAKE_CASE : Dict = num_mel_bins
SCREAMING_SNAKE_CASE : Optional[Any] = padding_value
SCREAMING_SNAKE_CASE : Dict = sampling_rate
SCREAMING_SNAKE_CASE : int = return_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = do_normalize
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self , _lowerCamelCase=False , _lowerCamelCase=False ) ->int:
def _flatten(_lowerCamelCase ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Dict = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : int = SpeechaTextFeatureExtractionTester(self )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.assertTrue(np.all(np.mean(_lowerCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def __lowerCAmelCase ( self ) ->int:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Any = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : Any = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Tuple = np.asarray(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE : List[str] = [None, 16, None]
for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_attention_mask=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = inputs.input_features
SCREAMING_SNAKE_CASE : int = inputs.attention_mask
SCREAMING_SNAKE_CASE : Dict = [np.sum(_lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : str = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE : int = [None, 16, None]
for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(
_lowerCamelCase , max_length=_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' , return_attention_mask=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = inputs.input_features
SCREAMING_SNAKE_CASE : str = inputs.attention_mask
SCREAMING_SNAKE_CASE : int = [np.sum(_lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(
_lowerCamelCase , padding='''max_length''' , max_length=4 , truncation=_lowerCamelCase , return_tensors='''np''' , return_attention_mask=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = inputs.input_features
SCREAMING_SNAKE_CASE : int = inputs.attention_mask
SCREAMING_SNAKE_CASE : Tuple = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Dict = feature_extractor(
_lowerCamelCase , padding='''longest''' , max_length=4 , truncation=_lowerCamelCase , return_tensors='''np''' , return_attention_mask=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = inputs.input_features
SCREAMING_SNAKE_CASE : Optional[int] = inputs.attention_mask
SCREAMING_SNAKE_CASE : Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
_lowerCamelCase , padding='''longest''' , max_length=16 , truncation=_lowerCamelCase , return_tensors='''np''' , return_attention_mask=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs.input_features
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.attention_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __lowerCAmelCase ( self ) ->Tuple:
import torch
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Any = np.random.rand(100 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
from datasets import load_dataset
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : str = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCAmelCase ( self ) ->int:
# fmt: off
SCREAMING_SNAKE_CASE : Any = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
SCREAMING_SNAKE_CASE : Any = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(_lowerCamelCase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowerCamelCase , atol=1e-4 ) )
| 333
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return 10 - x * x
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if equation(a__ ) * equation(a__ ) >= 0:
raise ValueError('''Wrong space!''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
SCREAMING_SNAKE_CASE : Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(a__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(a__ ) * equation(a__ ) < 0:
SCREAMING_SNAKE_CASE : Optional[int] = c
else:
SCREAMING_SNAKE_CASE : int = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 333
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.